text
stringlengths 27
947k
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
80
|
---|---|---|---|
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
bool IsDeconv_ = false
>
class Conv3dFpropFilterTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static bool const IsDeconv = IsDeconv_;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv3dAnalyticParams<Layout>;
private:
Params const ¶ms_;
ConvProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
int filter_t_;
int filter_r_;
int filter_s_;
int filter_c_;
int offset_k_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv3dFpropFilterTileAccessIteratorAnalytic(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_t_(0),
filter_r_(0),
filter_s_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.row() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] = threadblock_offset.column() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * 8 / sizeof_bits<Element>::value;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
return;
}
filter_t_ = 0;
filter_c_ += Shape::kRow * problem_size_.split_k_slices;
}
/// Returns the coordinate in the filter tensor W that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int k = offset_k_[iteration_strided_];
return TensorCoord(k, filter_t_, filter_r_, filter_s_, filter_c_);
}
/// Returns true if the current coordinate is within the activations tensor W
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
auto input_channels = (IsDeconv ? problem_size_.K : problem_size_.C);
auto output_channels = (IsDeconv ? problem_size_.C : problem_size_.K);
return coord.n() < output_channels &&
coord.c() < input_channels;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dFpropFilterTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
auto input_channels = (IsDeconv ? problem_size.K : problem_size.C);
auto output_channels = (IsDeconv ? problem_size.C : problem_size.K);
// check alignment constraint on iterator's contiguous dimension
if (input_channels % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 2671
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a multistage threadblock-scoped Implicit GEMM Convolution kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/cache_operation.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class ImplicitGemmMultistage :
public gemm::threadblock::MmaBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = gemm::threadblock::MmaBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Policy describing tuning details
using Policy = Policy_;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
//
// Dependent types
//
/// Fragment of accumulator tile
using ElementC = typename Policy::Operator::ElementC;
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Internal structure exposed for introspection.
struct Detail {
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
// Optional staged-accumulation (e.g., tf32x3 kernels) for improved numerical
// accuracy, where each mainloop iteration first accumulates into a temporary
// set of freshly-cleared accumulators, which are subsequently added to the
// final accumulator set.
static bool const kStagedAccumulation = arch::detail::UseStagedAccumulation<Operator>::value;
};
private:
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
ImplicitGemmMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
CUTLASS_DEVICE
void copy_tiles_and_advance(
IteratorA &iterator_A, IteratorB &iterator_B,
int group_start_A = 0, int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A *
IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
}
iterator_B.set_iteration_index(group_start_B *
IteratorB::kAccessesPerVector);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B.get(), iterator_B.valid());
++iterator_B;
}
++this->smem_iterator_B_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< initial value of accumulator
FragmentC const &src_accum,
///< number of iterations per channel
int gemm_k_iterations_per_channel = 0,
///< Imaginary strides used for planar-complex only - ignored here
int64_t imag_stride_A = 0,
int64_t imag_stride_B = 0) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B.get(), iterator_B.valid());
++iterator_B;
}
++this->smem_iterator_B_;
}
// Move to the next stage
iterator_A.advance();
iterator_B.advance();
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Waits until kStages-2 stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[2];
WarpLoadedFragmentB warp_loaded_frag_B[2];
WarpTransformedFragmentA warp_transformed_frag_A[2];
WarpTransformedFragmentB warp_transformed_frag_B[2];
Operator warp_mma;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
// Start issuing the first group of the next stage outside of the mainloop
copy_tiles_and_advance(iterator_A, iterator_B);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0],
warp_loaded_frag_A[0], warp_loaded_frag_B[0]);
// tf32x3 kernels use staging accumulation. warp_mma uses a temporary
// accumulator and this temporary accumulator is added to the final
// accumulator once in every mainloop iteration.
plus<FragmentC> plus_accum;
FragmentC tmp_accum;
if (Detail::kStagedAccumulation) {
tmp_accum.clear();
}
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k > 0)
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B[warp_mma_k % 2]);
// Issue global->shared copies for the next stage
int group_start_iteration_A, group_start_iteration_B;
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
group_start_iteration_A = 0;
group_start_iteration_B = 0;
} else {
group_start_iteration_A =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB;
}
copy_tiles_and_advance(iterator_A, iterator_B, group_start_iteration_A,
group_start_iteration_B);
if (Detail::kStagedAccumulation) {
warp_mma(
tmp_accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
tmp_accum
);
if (warp_mma_k == 0) {
accum = plus_accum(accum, tmp_accum);
tmp_accum.clear();
}
} else {
warp_mma(
accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
accum
);
}
if (warp_mma_k + 1 == Base::kWarpGemmIterations)
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages of cp.async have committed
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A.advance();
iterator_B.advance();
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
}
}
}
if (Detail::kStagedAccumulation) {
accum = plus_accum(accum, tmp_accum);
}
// Insert fence and wait for all outstanding cp.async operations to commit.
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/implicit_gemm_multistage.h/0 | {
"file_path": "include/cutlass/conv/threadblock/implicit_gemm_multistage.h",
"repo_id": "include",
"token_count": 8208
} | 28 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cute/layout.hpp"
#include "cute/util/type_traits.hpp"
#include "cute/arch/copy_sm90_tma.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::detail {
////////////////////////////////////////////////////////////////////////////////////////////////////
// For each cutlass::layout, provides its corresponding cute stride types, 64b by default
template <class L>
struct TagToStrideA {
using type = L;
};
// Maps to modes [M, K, L]
template <>
struct TagToStrideA<layout::RowMajor> {
using type = cute::Stride<int64_t, cute::Int<1>, int64_t>;
using tag = layout::RowMajor;
};
// Maps to modes [M, K, L]
template <>
struct TagToStrideA<layout::ColumnMajor> {
using type = cute::Stride<cute::Int<1>, int64_t, int64_t>;
using tag = layout::ColumnMajor;
};
template <class L>
struct TagToStrideB {
using type = L;
};
// Maps to modes [N, K, L]
template <>
struct TagToStrideB<layout::RowMajor> {
using type = cute::Stride<cute::Int<1>, int64_t, int64_t>;
using tag = layout::RowMajor;
};
// Maps to modes [N, K, L]
template <>
struct TagToStrideB<layout::ColumnMajor> {
using type = cute::Stride<int64_t, cute::Int<1>, int64_t>;
using tag = layout::ColumnMajor;
};
// For each cutlass::layout *, provides its corresponding cute stride types, 64b by default
// Used by pointer array and grouped gemm
// Maps to modes [M, K, L]
template <>
struct TagToStrideA<layout::RowMajor *> {
using UnderlyingType = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>;
using type = UnderlyingType*;
using tag = layout::RowMajor;
};
// Maps to modes [M, K, L]
template <>
struct TagToStrideA<layout::ColumnMajor *> {
using UnderlyingType = cute::Stride<cute::Int<1>, int64_t, cute::Int<0>>;
using type = UnderlyingType*;
using tag = layout::ColumnMajor;
};
// Maps to modes [N, K, L]
template <>
struct TagToStrideB<layout::RowMajor *> {
using UnderlyingType = cute::Stride<cute::Int<1>, int64_t, cute::Int<0>>;
using type = UnderlyingType*;
using tag = layout::RowMajor;
};
// Maps to modes [N, K, L]
template <>
struct TagToStrideB<layout::ColumnMajor *> {
using UnderlyingType = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>;
using type = UnderlyingType*;
using tag = layout::ColumnMajor;
};
// Maps to modes [M, N, L]
template <class LayoutTag>
struct TagToStrideC : TagToStrideA<LayoutTag> { };
// Conv: Maps to modes ((P,N), C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorNWC> {
using type = cute::Stride<cute::Stride<int64_t, int64_t>, cute::Int<1>, cute::Int<0>>;
};
// Conv: Maps to modes (PN, C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorLinearizedNWC> {
using type = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>;
};
// Conv: Maps to modes ((P,Q,N), C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorNHWC> {
using type = cute::Stride<cute::Stride<int64_t, int64_t, int64_t>, cute::Int<1>, cute::Int<0>>;
};
// Conv: Maps to modes (PQN, C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorLinearizedNHWC> {
using type = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>;
};
// Conv: Maps to modes ((P,Q,Z,N), C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorNDHWC> {
using type = cute::Stride<cute::Stride<int64_t, int64_t, int64_t, int64_t>, cute::Int<1>, cute::Int<0>>;
};
// Conv: Maps to modes (PQZN, C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorLinearizedNDHWC> {
using type = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>;
};
// Conv: Maps to modes (K, (C,S), _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorKCS> {
using type = cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t>, cute::Int<0>>;
};
// Conv: Maps to modes (K, (C,S,R), _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorKCSR> {
using type = cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t, int64_t>, cute::Int<0>>;
};
// Conv: Maps to modes (K, (C,S,R,T), _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorKCSRT> {
using type = cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t, int64_t, int64_t>, cute::Int<0>>;
};
// Convenience aliases
template<class LayoutTag>
using TagToStrideA_t = typename TagToStrideA<LayoutTag>::type;
template<class LayoutTag>
using TagToStrideB_t = typename TagToStrideB<LayoutTag>::type;
template<class LayoutTag>
using TagToStrideC_t = typename TagToStrideC<LayoutTag>::type;
////////////////////////////////////////////////////////////////////////////////////////////////////
// For 2.x compatibility APIs, provide stride->layout tag mappers
template<int ModeIndex, class Stride>
constexpr bool
is_major(Stride = {}) {
// Account for stride types with and without batch mode and batch modes with static zero stride
return cute::is_constant<1, decltype(cute::front(cute::get<ModeIndex>(cute::remove_pointer_t<Stride>{})))>::value;
}
// Note : This method can be used for deducing the Layout Tag of A, C, D Matrices
template<class StrideA>
constexpr
auto
stride_to_layout_tag_A() {
if constexpr (is_major<0, StrideA>()) { // M major
return layout::ColumnMajor{};
}
else { // K major
return layout::RowMajor{};
}
CUTE_GCC_UNREACHABLE;
}
template<class StrideB>
constexpr
auto
stride_to_layout_tag_B() {
if constexpr (is_major<0, StrideB>()) { // N major
return layout::RowMajor{};
}
else { // K major
return layout::ColumnMajor{};
}
CUTE_GCC_UNREACHABLE;
}
template<class StrideC>
constexpr
auto
stride_to_layout_tag_C() {
if constexpr (is_major<0, StrideC>()) { // M major
return layout::ColumnMajor{};
}
else { // N major
return layout::RowMajor{};
}
CUTE_GCC_UNREACHABLE;
}
// Utilities to map Stride back on to their corresponding layout tags
template <class S>
struct StrideToLayoutTagA {
using type = decltype(detail::stride_to_layout_tag_A<S>());
};
template <class S>
struct StrideToLayoutTagB {
using type = decltype(detail::stride_to_layout_tag_B<S>());
};
template <class S>
struct StrideToLayoutTagC {
using type = decltype(detail::stride_to_layout_tag_C<S>());
};
// Convenience aliases
template<class S>
using StrideToLayoutTagA_t = typename StrideToLayoutTagA<S>::type;
template<class S>
using StrideToLayoutTagB_t = typename StrideToLayoutTagB<S>::type;
template<class S>
using StrideToLayoutTagC_t = typename StrideToLayoutTagC<S>::type;
////////////////////////////////////////////////////////////////////////////////////////////////////
// Inspects a tiled copy and whether its copy engine is TMA or not
template<class GmemTiledCopy>
constexpr bool is_tma_copy_engine() {
if constexpr (cute::is_void_v<GmemTiledCopy>) {
return false;
}
else {
if constexpr ( cute::is_base_of_v<cute::SM90_TMA_LOAD, GmemTiledCopy>
|| cute::is_base_of_v<cute::SM90_TMA_LOAD_MULTICAST, GmemTiledCopy>
|| cute::is_base_of_v<cute::SM90_TMA_LOAD_IM2COL, GmemTiledCopy>
|| cute::is_base_of_v<cute::SM90_TMA_LOAD_IM2COL_MULTICAST, GmemTiledCopy>
|| cute::is_base_of_v<cute::SM90_TMA_STORE, GmemTiledCopy>
|| cute::is_base_of_v<cute::SM90_TMA_STORE_IM2COL, GmemTiledCopy>
) {
return true;
}
}
return false;
}
template <class X, class = void>
struct RawDtype { using type = X; };
template <class X>
struct RawDtype<X,cute::void_t<typename X::raw_type>> { using type = typename X::raw_type; };
// Inspects a TiledCopy and returns its alignment in terms of element count
template <class GmemTiledCopy, class Element, class ElementMma = Element>
constexpr int
get_alignment_count_from_gmem_tiled_copy() {
if constexpr (cute::is_void_v<GmemTiledCopy>) {
return 1;
}
// Account for ElementC = void kernels
else if constexpr (cute::is_void_v<Element>) {
return 0;
}
else {
// For TMA tiled copies, we know the alignment has to be 128 bits
if constexpr (is_tma_copy_engine<GmemTiledCopy>()) {
return 128 / sizeof_bits<Element>::value;
}
else {
// For non-TMA tiled copies, TiledCopy holds the alignment count directly in its TiledShape_MN
return GmemTiledCopy::NumValSrc;
}
}
}
// Return the shape that is associated with stride-1 mode, or 1 if not found
template<typename Shape, typename Stride>
CUTLASS_HOST_DEVICE constexpr
auto
get_contiguous_shape(Shape const & shape, Stride const & stride) {
using namespace cute;
auto idx = find_if(append(flatten(stride), _1{}), [](auto s){ return is_constant<1,decltype(s)>{}; });
return get<decltype(idx)::value>(append(flatten(shape), _1{}));
}
// Check if tensor shape satisfies a given major alignment
template<int Alignment, class Shape, class Stride>
CUTLASS_HOST_DEVICE constexpr
bool
check_alignment(Shape const & shape, Stride const & stride) {
return is_major<0>(stride)
? get_contiguous_shape(cute::get<0>(shape), cute::get<0>(stride)) % Alignment == 0
: get_contiguous_shape(cute::get<1>(shape), cute::get<1>(stride)) % Alignment == 0;
}
// Check if tensor shape satisfies a given major alignment
template<int B, int M, int S>
CUTLASS_HOST_DEVICE constexpr
size_t
alignment_for_swizzle(cute::Swizzle<B, M, S>) {
static_assert(B >= 0 and M >= 0);
return size_t(1) << size_t(B + M + cute::abs(S));
}
template<class Layout>
CUTLASS_HOST_DEVICE constexpr
size_t
alignment_for_swizzle(Layout layout) {
return alignment_for_swizzle(cute::detail::get_swizzle_portion(layout));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::detail
| include/cutlass/detail/layout.hpp/0 | {
"file_path": "include/cutlass/detail/layout.hpp",
"repo_id": "include",
"token_count": 4353
} | 29 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Fusion callbacks specializations for the sm90 TMA warp-specialized (ws) epilogue
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cutlass/epilogue/dispatch_policy.hpp"
#include "cutlass/epilogue/fusion/callbacks.hpp"
#include "cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp"
#include "cutlass/epilogue/fusion/sm90_visitor_load_tma_warpspecialized.hpp"
#include "cutlass/epilogue/fusion/sm90_visitor_store_tma_warpspecialized.hpp"
#include "cutlass/epilogue/fusion/sm90_visitor_compute_tma_warpspecialized.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::fusion {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <class NodeOp, class... ChildOps>
using Sm90EVT = Sm90TreeVisitor<NodeOp, ChildOps...>;
// D = alpha * acc
template <
int StagesC,
int StagesD,
int FragmentSize,
bool ReuseSmemC,
bool DelayTmaStore,
class ElementOutput,
class ElementCompute,
class ElementScalar,
FloatRoundStyle RoundStyle,
class CtaTileShapeMNK,
class EpilogueTile
>
struct FusionCallbacks<
epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
fusion::ScaledAcc<ElementOutput, ElementCompute, ElementScalar, RoundStyle>,
CtaTileShapeMNK,
EpilogueTile
> : Sm90EVT<Sm90Compute<multiplies, ElementOutput, ElementCompute, RoundStyle>,
Sm90ScalarBroadcast<ElementScalar>,
Sm90AccFetch
> {
using Impl =
Sm90EVT<Sm90Compute<multiplies, ElementOutput, ElementCompute, RoundStyle>,
Sm90ScalarBroadcast<ElementScalar>,
Sm90AccFetch
>;
using Operation = fusion::ScaledAcc<ElementOutput, ElementCompute, ElementScalar, RoundStyle>;
struct Arguments {
// Give a name and flat ordering to the fusion callback args
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
ElementScalar const* alpha_ptr = nullptr;
ElementScalar const* beta_ptr = nullptr;
// Conversion to the args expected by the visitor implementation
// to_underlying_arguments will implicitly call this
operator typename Impl::Arguments() const {
return
{ // binary op : alpha * acc
{{alpha}, {alpha_ptr}}, // leaf args : alpha
{}, // leaf args : acc
{} // binary args : multiplies
}; // end binary op
}
};
// Ctor inheritance
using Impl::Impl;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// D = alpha * acc + beta * C
template<
class ElementOutput,
class ElementCompute,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinearCombination =
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc)
Sm90ScalarBroadcast<ElementScalar>, // beta
Sm90SrcFetch<ElementSource>, // C
Sm90EVT<Sm90Compute<multiplies, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc
Sm90ScalarBroadcast<ElementScalar>, // alpha
Sm90AccFetch // acc
>
>;
template <
int StagesC,
int StagesD,
int FragmentSize,
bool ReuseSmemC,
bool DelayTmaStore,
class ElementOutput,
class ElementCompute,
class ElementSource,
class ElementScalar,
FloatRoundStyle RoundStyle,
class CtaTileShapeMNK,
class EpilogueTile
>
struct FusionCallbacks<
epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
fusion::LinearCombination<ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle>,
CtaTileShapeMNK,
EpilogueTile
> : Sm90LinearCombination<typename cutlass::detail::get_unpacked_element_type<ElementOutput>::type, ElementCompute, ElementSource, ElementScalar, RoundStyle> {
using Impl = Sm90LinearCombination<typename cutlass::detail::get_unpacked_element_type<ElementOutput>::type, ElementCompute, ElementSource, ElementScalar, RoundStyle>;
using Operation = fusion::LinearCombination<ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle>;
struct Arguments {
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
ElementScalar const* alpha_ptr = nullptr;
ElementScalar const* beta_ptr = nullptr;
operator typename Impl::Arguments() const {
return
{ // ternary op : beta * C + (alpha * acc)
{{beta}, {beta_ptr}}, // leaf args : beta
{}, // leaf args : C
{ // binary op : alpha * acc
{{alpha}, {alpha_ptr}}, // leaf args : alpha
{}, // leaf args : acc
{} // binary args : multiplies
}, // end binary op
{} // ternary args : multiply_add
}; // end ternary op
}
};
// Ctor inheritance
using Impl::Impl;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// D = activation(alpha * acc + beta * C)
template<
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinCombEltAct =
Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>, // activation(beta * C + (alpha * acc))
Sm90LinearCombination<ElementCompute, ElementCompute, ElementSource, ElementScalar, RoundStyle> // beta * C + (alpha * acc)
>;
template <
int StagesC,
int StagesD,
int FragmentSize,
bool ReuseSmemC,
bool DelayTmaStore,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementSource,
class ElementScalar,
FloatRoundStyle RoundStyle,
class CtaTileShapeMNK,
class EpilogueTile
>
struct FusionCallbacks<
epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
fusion::LinCombEltAct<ActivationFn, ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle>,
CtaTileShapeMNK,
EpilogueTile
> : Sm90LinCombEltAct<ActivationFn, ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle> {
using Impl = Sm90LinCombEltAct<ActivationFn, typename cutlass::detail::get_unpacked_element_type<ElementOutput>::type, ElementCompute, ElementSource, ElementScalar, RoundStyle>;
using Operation = fusion::LinCombEltAct<ActivationFn, ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle>;
struct Arguments {
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
ElementScalar const* alpha_ptr = nullptr;
ElementScalar const* beta_ptr = nullptr;
using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments;
ActivationArguments activation = ActivationArguments();
operator typename Impl::Arguments() const {
return
{ // unary op: activation(beta * C + (alpha * acc))
{ // ternary op : beta * C + (alpha * acc)
{{beta}, {beta_ptr}}, // leaf args : beta
{}, // leaf args : C
{ // binary op : alpha * acc
{{alpha}, {alpha_ptr}}, // leaf args : alpha
{}, // leaf args : acc
{} // binary args : multiplies
}, // end binary op
{} // ternary args : multiply_add
}, // end ternary op
activation // unary args: activation
}; // end unary op
}
};
// Ctor inheritance
using Impl::Impl;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// D = alpha * acc + beta * C + per-row bias
template<
class CtaTileShapeMNK,
class ElementOutput,
class ElementCompute,
class ElementBias = ElementOutput,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
int AlignmentBias = 128 / sizeof_bits_v<ElementBias>,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinCombPerRowBias =
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc + bias)
Sm90ScalarBroadcast<ElementScalar>, // beta
Sm90SrcFetch<ElementSource>, // C
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc + bias
Sm90ScalarBroadcast<ElementScalar>, // alpha
Sm90AccFetch, // acc
Sm90ColBroadcast<0, CtaTileShapeMNK, ElementBias, Stride<_1,_0,int>, AlignmentBias> // bias
>
>;
template <
int StagesC,
int StagesD,
int FragmentSize,
bool ReuseSmemC,
bool DelayTmaStore,
class ElementOutput,
class ElementCompute,
class ElementBias,
class ElementSource,
class ElementScalar,
int AlignmentBias,
FloatRoundStyle RoundStyle,
class CtaTileShapeMNK,
class EpilogueTile
>
struct FusionCallbacks<
epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
fusion::LinCombPerRowBias<ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>,
CtaTileShapeMNK,
EpilogueTile
> : Sm90LinCombPerRowBias<
CtaTileShapeMNK, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle> {
using Impl = Sm90LinCombPerRowBias<
CtaTileShapeMNK, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>;
using Operation = fusion::LinCombPerRowBias<
ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>;
struct Arguments {
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
ElementScalar const* alpha_ptr = nullptr;
ElementScalar const* beta_ptr = nullptr;
using StrideBias = Stride<_1,_0,int>;
ElementBias const* bias_ptr = nullptr;
StrideBias dBias = {};
operator typename Impl::Arguments() const {
return
{ // ternary op : beta * C + (alpha * acc + bias)
{{beta}, {beta_ptr}}, // leaf args : beta
{}, // leaf args : C
{ // ternary op : alpha * acc + bias
{{alpha}, {alpha_ptr}}, // leaf args : alpha
{}, // leaf args : acc
{bias_ptr, ElementBias(0), dBias}, // leaf args : bias
{} // ternary args : multiply_add
}, // end ternary op
{} // ternary args : multiply_add
}; // end ternary op
}
};
// Ctor inheritance
using Impl::Impl;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// D = activation(alpha * acc + beta * C + per-row bias)
template<
class CtaTileShapeMNK,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementBias = ElementOutput,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
int AlignmentBias = 128 / sizeof_bits_v<ElementBias>,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinCombPerRowBiasEltAct =
Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>,
Sm90LinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>
>;
template <
int StagesC,
int StagesD,
int FragmentSize,
bool ReuseSmemC,
bool DelayTmaStore,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementBias,
class ElementSource,
class ElementScalar,
int AlignmentBias,
FloatRoundStyle RoundStyle,
class CtaTileShapeMNK,
class EpilogueTile
>
struct FusionCallbacks<
epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
fusion::LinCombPerRowBiasEltAct<
ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle
>,
CtaTileShapeMNK,
EpilogueTile
> : Sm90LinCombPerRowBiasEltAct<
CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle
> {
using Impl =
Sm90LinCombPerRowBiasEltAct<
CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle
>;
using Operation =
fusion::LinCombPerRowBiasEltAct<
ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle
>;
struct Arguments {
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
ElementScalar const* alpha_ptr = nullptr;
ElementScalar const* beta_ptr = nullptr;
using StrideBias = Stride<_1,_0,int>;
ElementBias const* bias_ptr = nullptr;
StrideBias dBias = {};
using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments;
ActivationArguments activation = ActivationArguments();
operator typename Impl::Arguments() const {
return
{ // unary op : activation(beta * C + (alpha * acc + bias))
{ // ternary op : beta * C + (alpha * acc + bias)
{{beta}, {beta_ptr}}, // leaf args : beta
{}, // leaf args : C
{ // ternary op : alpha * acc + bias
{{alpha}, {alpha_ptr}}, // leaf args : alpha
{}, // leaf args : acc
{bias_ptr, ElementBias(0), dBias}, // leaf args : bias
{} // ternary args : multiply_add
}, // end ternary op
{} // ternary args : multiply_add
}, // end ternary op
activation // unary args : activation
}; // end unary op
}
};
// Ctor inheritance
using Impl::Impl;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// D = activation(alpha * acc + beta * C + per-row bias)
// Aux = alpha * acc + beta * C + per-row bias)
template<
class CtaTileShapeMNK,
class EpilogueTile,
int Stages,
class StrideAux,
class SmemLayoutAtom,
class CopyOpR2S,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementAux = ElementOutput,
class ElementBias = ElementOutput,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
int AlignmentAux = 128 / sizeof_bits_v<ElementAux>,
int AlignmentBias = 128 / sizeof_bits_v<ElementBias>,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinCombPerRowBiasEltActAux =
Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>,
Sm90EVT<Sm90AuxStore<Stages, EpilogueTile, ElementAux, RoundStyle, StrideAux, SmemLayoutAtom, CopyOpR2S, AlignmentAux>,
Sm90LinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>
>
>;
template <
int StagesC,
int StagesD,
int FragmentSize,
bool ReuseSmemC,
bool DelayTmaStore,
class GmemLayoutTagAux,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementAux,
class ElementBias,
class ElementSource,
class ElementScalar,
int AlignmentAux,
int AlignmentBias,
FloatRoundStyle RoundStyle,
class CtaTileShapeMNK,
class EpilogueTile,
class SmemLayoutAtom,
class CopyOpR2S
>
struct FusionCallbacks<
epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
fusion::LinCombPerRowBiasEltActAux<
GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute,
ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle
>,
CtaTileShapeMNK,
EpilogueTile,
SmemLayoutAtom,
CopyOpR2S
> : Sm90LinCombPerRowBiasEltActAux<
CtaTileShapeMNK, EpilogueTile, StagesD, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpR2S, ActivationFn,
ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle
> {
using Impl =
Sm90LinCombPerRowBiasEltActAux<
CtaTileShapeMNK, EpilogueTile, StagesD, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpR2S, ActivationFn,
ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle
>;
using Operation =
fusion::LinCombPerRowBiasEltActAux<
GmemLayoutTagAux, ActivationFn,
ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle
>;
struct Arguments {
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
ElementScalar const* alpha_ptr = nullptr;
ElementScalar const* beta_ptr = nullptr;
using StrideBias = Stride<_1,_0,int>;
ElementBias const* bias_ptr = nullptr;
StrideBias dBias = {};
using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments;
ActivationArguments activation = ActivationArguments();
using StrideAux = cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>;
ElementAux* aux_ptr = nullptr;
StrideAux dAux = {};
operator typename Impl::Arguments() const {
return
{ // unary op : activation(store(beta * C + (alpha * acc + bias)))
{ // unary op : store(beta * C + (alpha * acc + bias))
{ // ternary op : beta * C + (alpha * acc + bias)
{{beta}, {beta_ptr}}, // leaf args : beta
{}, // leaf args : C
{ // ternary op : alpha * acc + bias
{{alpha}, {alpha_ptr}}, // leaf args : alpha
{}, // leaf args : acc
{bias_ptr, ElementBias(0), dBias}, // leaf args : bias
{} // ternary args : multiply_add
}, // end ternary op
{} // ternary args : multiply_add
}, // end ternary op
{aux_ptr, dAux} // unary args : store
}, // end unary op
activation // unary args : activation
}; // end unary op
}
};
// Ctor inheritance
using Impl::Impl;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// D = per-row alpha * acc + per-row beta * C + per-row bias
template<
class CtaTileShapeMNK,
class ElementOutput,
class ElementCompute,
class ElementBias = ElementOutput,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
int AlignmentBias = 128 / sizeof_bits_v<ElementBias>,
int AlignmentScalar = 128 / sizeof_bits_v<ElementScalar>,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90PerRowLinCombPerRowBias =
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc + bias)
Sm90ColBroadcast<0, CtaTileShapeMNK, ElementScalar, Stride<_1,_0,int>, AlignmentScalar>, // beta
Sm90SrcFetch<ElementSource>, // C
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc + bias
Sm90ColBroadcast<0, CtaTileShapeMNK, ElementScalar, Stride<_1,_0,int>, AlignmentScalar>, // alpha
Sm90AccFetch, // acc
Sm90ColBroadcast<0, CtaTileShapeMNK, ElementBias, Stride<_1,_0,int>, AlignmentBias> // bias
>
>;
// D = activation(per-row alpha * acc + per-row beta * C + per-row bias)
template<
class CtaTileShapeMNK,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementBias = ElementOutput,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
int AlignmentBias = 128 / sizeof_bits_v<ElementBias>,
int AlignmentScalar = 128 / sizeof_bits_v<ElementScalar>,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90PerRowLinCombPerRowBiasEltAct =
Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>,
Sm90PerRowLinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute,
ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle>
>;
template <
int StagesC,
int StagesD,
int FragmentSize,
bool ReuseSmemC,
bool DelayTmaStore,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementBias,
class ElementSource,
class ElementScalar,
int AlignmentBias,
int AlignmentScalar,
FloatRoundStyle RoundStyle,
class CtaTileShapeMNK,
class EpilogueTile
>
struct FusionCallbacks<
epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
fusion::PerRowLinCombPerRowBiasEltAct<
ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle
>,
CtaTileShapeMNK,
EpilogueTile
> : Sm90PerRowLinCombPerRowBiasEltAct<
CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle
> {
using Impl =
Sm90PerRowLinCombPerRowBiasEltAct<
CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle
>;
using Operation =
fusion::PerRowLinCombPerRowBiasEltAct<
ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle
>;
struct Arguments {
using StrideAlpha = Stride<_1,_0,int>;
using StrideBeta = Stride<_1,_0,int>;
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
ElementScalar const* alpha_ptr = nullptr;
ElementScalar const* beta_ptr = nullptr;
StrideAlpha dAlpha = {};
StrideBeta dBeta = {};
using StrideBias = Stride<_1,_0,int>;
ElementBias const* bias_ptr = nullptr;
StrideBias dBias = {};
using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments;
ActivationArguments activation = ActivationArguments();
operator typename Impl::Arguments() const {
return
{ // unary op : activation(beta * C + (alpha * acc + bias))
{ // ternary op : beta * C + (alpha * acc + bias)
{beta_ptr, beta, dBeta}, // leaf args : beta
{}, // leaf args : C
{ // ternary op : alpha * acc + bias
{alpha_ptr, alpha, dAlpha}, // leaf args : alpha
{}, // leaf args : acc
{bias_ptr, ElementBias(0), dBias}, // leaf args : bias
{} // ternary args : multiply_add
}, // end ternary op
{} // ternary args : multiply_add
}, // end ternary op
activation // unary args : activation
}; // end unary op
}
};
// Ctor inheritance
using Impl::Impl;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename T>
constexpr bool is_fp8_v = cute::is_same_v<T,float_e4m3_t> || cute::is_same_v<T,float_e5m2_t>;
// We only apply the scaling factor if output is fp8
template <typename ElementOutput>
struct ScaleOutOp { template <typename T> using Op = cutlass::first<T>; };
template <>
struct ScaleOutOp<float_e4m3_t> { template <typename T> using Op = cutlass::multiplies<T>; };
template <>
struct ScaleOutOp<float_e5m2_t> { template <typename T> using Op = cutlass::multiplies<T>; };
template <typename T>
using amax = cutlass::maximum_absolute_value_reduction<T, true>; // propogate nans
}; // end namespace detail
// D = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias
template<
class CtaTileShapeMNK,
class ElementOutput,
class ElementCompute,
class ElementBias = ElementOutput,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
int AlignmentBias = 128 / sizeof_bits_v<ElementBias>,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90ScaledLinCombPerRowBias =
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc + bias)
Sm90ScalarBroadcast<ElementScalar, Stride<_0,_0,_0>, 2>, // scale_c * beta
Sm90SrcFetch<ElementSource>, // C
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc + bias
Sm90ScalarBroadcast<ElementScalar, Stride<_0,_0,_0>, 3>, // scale_a * scale_b * alpha
Sm90AccFetch, // acc
Sm90ColBroadcast<0, CtaTileShapeMNK, ElementBias, Stride<_1,_0,int>, AlignmentBias> // bias
>
>;
// Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias
// if D is fp8
// D = scale_d * activation(Z)
// else
// D = activation(Z)
template<
class CtaTileShapeMNK,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementBias = ElementOutput,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
int AlignmentBias = 128 / sizeof_bits_v<ElementBias>,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90ScaledLinCombPerRowBiasEltAct =
Sm90EVT<Sm90Compute<detail::ScaleOutOp<ElementOutput>::template Op, ElementOutput, ElementCompute, RoundStyle>, // activation(Z) * scale_d
Sm90EVT<Sm90Compute<ActivationFn, ElementCompute, ElementCompute, RoundStyle>, // activation(Z)
// Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias
Sm90ScaledLinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>
>,
Sm90ScalarBroadcast<ElementScalar> // scale_d
>;
template <
int StagesC,
int StagesD,
int FragmentSize,
bool ReuseSmemC,
bool DelayTmaStore,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementBias,
class ElementSource,
class ElementScalar,
int AlignmentBias,
FloatRoundStyle RoundStyle,
class CtaTileShapeMNK,
class EpilogueTile
>
struct FusionCallbacks<
epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
fusion::ScaledLinCombPerRowBiasEltAct<
ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle
>,
CtaTileShapeMNK,
EpilogueTile
> : Sm90ScaledLinCombPerRowBiasEltAct<
CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle
> {
using Impl =
Sm90ScaledLinCombPerRowBiasEltAct<
CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle
>;
using Operation =
fusion::ScaledLinCombPerRowBiasEltAct<
ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle
>;
struct Arguments {
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
ElementScalar const* alpha_ptr = nullptr;
ElementScalar const* beta_ptr = nullptr;
ElementScalar scale_a = ElementScalar(1);
ElementScalar scale_b = ElementScalar(1);
ElementScalar scale_c = ElementScalar(1);
ElementScalar scale_d = ElementScalar(1);
ElementScalar const* scale_a_ptr = nullptr;
ElementScalar const* scale_b_ptr = nullptr;
ElementScalar const* scale_c_ptr = nullptr;
ElementScalar const* scale_d_ptr = nullptr;
using StrideBias = Stride<_1,_0,int>;
ElementBias const* bias_ptr = nullptr;
StrideBias dBias = {};
using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments;
ActivationArguments activation = ActivationArguments();
operator typename Impl::Arguments() const {
return
{ // binary op : activation((scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias)) * scale_d
{ // unary op : activation((scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias))
{ // ternary op : (scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias)
{{scale_c, beta},
{scale_c_ptr, beta_ptr}
}, // leaf args : (scale_c * beta)
{}, // leaf args : C
{ // ternary op : (scale_a * scale_b * alpha) * acc + bias
{{scale_a, scale_b, alpha},
{scale_a_ptr, scale_b_ptr, alpha_ptr}
}, // leaf args : (scale_a * scale_b * alpha)
{}, // leaf args : acc
{bias_ptr, ElementBias(0), dBias}, // leaf args : bias
{} // ternary args : multiply_add
}, // end ternary op
{} // ternary args : multiply_add
}, // end ternary op
activation // unary args : activation
}, // end unary op
{{scale_d},
{scale_d_ptr}
}, // leaf args : scale_d
{} // binary args : multiplies or first
}; // end binary op
}
};
// Ctor inheritance
using Impl::Impl;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias
// if D is fp8
// amax_d = max(abs(elements in activation(Z)))
// D = scale_d * activation(Z)
// else
// D = activation(Z)
// if Aux is fp8
// amax_aux = max(abs(elements in Z))
// Aux = scale_aux * Z
// else
// Aux = Z
// fp8 aux specialization
template<
class CtaTileShapeMNK,
class EpilogueTile,
int StagesD,
class StrideAux,
class SmemLayoutAtom,
class CopyOpR2S,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementAux = ElementOutput,
class ElementAmax = ElementCompute,
class ElementBias = ElementOutput,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
int AlignmentAux = 128 / sizeof_bits_v<ElementAux>,
int AlignmentBias = 128 / sizeof_bits_v<ElementBias>,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90ScaledLinCombPerRowBiasEltActAmaxAuxFp8 =
Sm90SplitTreeVisitor<
// Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias
Sm90ScaledLinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>,
// D = activation(Z) * scale_d, amax_d = max(abs(elements in D))
Sm90EVT<Sm90Compute<detail::ScaleOutOp<ElementOutput>::template Op, ElementOutput, ElementCompute, RoundStyle>, // activation(Z) * scale_d
Sm90EVT<Sm90ScalarReduction<detail::amax, atomic_maximum, ElementAmax, ElementCompute, RoundStyle>, // amax_d
Sm90EVT<Sm90Compute<ActivationFn, ElementCompute, ElementCompute, RoundStyle>, // activation(Z)
Sm90SplitTreeFetch // Z
>
>,
Sm90ScalarBroadcast<ElementScalar> // scale_d
>,
// Aux = Z * scale_aux, amax_aux = max(abs(elements in Aux))
Sm90EVT<Sm90AuxStore<StagesD, EpilogueTile, ElementAux, RoundStyle, StrideAux, SmemLayoutAtom, CopyOpR2S, AlignmentAux>, // store(Aux)
Sm90EVT<Sm90Compute<cutlass::multiplies, ElementCompute, ElementCompute, RoundStyle>, // Z * scale_aux
Sm90EVT<Sm90ScalarReduction<detail::amax, atomic_maximum, ElementAmax, ElementCompute, RoundStyle>, // amax_aux
Sm90SplitTreeFetch // Z
>,
Sm90ScalarBroadcast<ElementScalar> // scale_aux
>
>
>;
// non-fp8 aux specialization
// lets us use some EVT specializations such as relu + uint1b_t aux
template<
class CtaTileShapeMNK,
class EpilogueTile,
int StagesD,
class StrideAux,
class SmemLayoutAtom,
class CopyOpR2S,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementAux = ElementOutput,
class ElementAmax = ElementCompute,
class ElementBias = ElementOutput,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
int AlignmentAux = 128 / sizeof_bits_v<ElementAux>,
int AlignmentBias = 128 / sizeof_bits_v<ElementBias>,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90ScaledLinCombPerRowBiasEltActAmaxAuxNotFp8 =
// D = activation(Z) * scale_d, amax_d = max(abs(elements in D))
Sm90EVT<Sm90Compute<detail::ScaleOutOp<ElementOutput>::template Op, ElementOutput, ElementCompute, RoundStyle>, // activation(Z) * scale_d
Sm90EVT<Sm90ScalarReduction<detail::amax, atomic_maximum, ElementAmax, ElementCompute, RoundStyle>, // amax_d
Sm90EVT<Sm90Compute<ActivationFn, ElementCompute, ElementCompute, RoundStyle>, // activation(Z)
Sm90EVT<Sm90AuxStore<StagesD, EpilogueTile, ElementAux, RoundStyle, StrideAux, SmemLayoutAtom, CopyOpR2S, AlignmentAux>, // Aux = Z
// Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias
Sm90ScaledLinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>,
>
>
>,
Sm90ScalarBroadcast<ElementScalar> // scale_d
>;
// dispatcher
template<
class CtaTileShapeMNK,
class EpilogueTile,
int StagesD,
class StrideAux,
class SmemLayoutAtom,
class CopyOpR2S,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementAux = ElementOutput,
class ElementAmax = ElementCompute,
class ElementBias = ElementOutput,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
int AlignmentAux = 128 / sizeof_bits_v<ElementAux>,
int AlignmentBias = 128 / sizeof_bits_v<ElementBias>,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90ScaledLinCombPerRowBiasEltActAmaxAux = conditional_t<detail::is_fp8_v<ElementAux>,
Sm90ScaledLinCombPerRowBiasEltActAmaxAuxFp8<
CtaTileShapeMNK, EpilogueTile, StagesD, StrideAux, SmemLayoutAtom, CopyOpR2S, ActivationFn,
ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar,AlignmentAux, AlignmentBias, RoundStyle
>,
Sm90ScaledLinCombPerRowBiasEltActAmaxAuxNotFp8<
CtaTileShapeMNK, EpilogueTile, StagesD, StrideAux, SmemLayoutAtom, CopyOpR2S, ActivationFn,
ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle
>
>;
template <
int StagesC,
int StagesD,
int FragmentSize,
bool ReuseSmemC,
bool DelayTmaStore,
class GmemLayoutTagAux,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementAux,
class ElementAmax,
class ElementBias,
class ElementSource,
class ElementScalar,
int AlignmentAux,
int AlignmentBias,
FloatRoundStyle RoundStyle,
class CtaTileShapeMNK,
class EpilogueTile,
class SmemLayoutAtom,
class CopyOpR2S
>
struct FusionCallbacks<
epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
fusion::ScaledLinCombPerRowBiasEltActAmaxAux<
GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute,
ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle
>,
CtaTileShapeMNK,
EpilogueTile,
SmemLayoutAtom,
CopyOpR2S
> : Sm90ScaledLinCombPerRowBiasEltActAmaxAux<
CtaTileShapeMNK, EpilogueTile, StagesD, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>,
SmemLayoutAtom, CopyOpR2S, ActivationFn,
ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle
> {
using Impl =
Sm90ScaledLinCombPerRowBiasEltActAmaxAux<
CtaTileShapeMNK, EpilogueTile, StagesD, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>,
SmemLayoutAtom, CopyOpR2S, ActivationFn,
ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle
>;
using Operation =
fusion::ScaledLinCombPerRowBiasEltActAmaxAux<
GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute,
ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle
>;
struct Arguments {
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
ElementScalar const* alpha_ptr = nullptr;
ElementScalar const* beta_ptr = nullptr;
ElementScalar scale_a = ElementScalar(1);
ElementScalar scale_b = ElementScalar(1);
ElementScalar scale_c = ElementScalar(1);
ElementScalar scale_d = ElementScalar(1);
ElementScalar const* scale_a_ptr = nullptr;
ElementScalar const* scale_b_ptr = nullptr;
ElementScalar const* scale_c_ptr = nullptr;
ElementScalar const* scale_d_ptr = nullptr;
ElementScalar scale_aux = ElementScalar(1);
ElementScalar const* scale_aux_ptr = nullptr;
using StrideBias = Stride<_1,_0,int>;
ElementBias const* bias_ptr = nullptr;
StrideBias dBias = {};
using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments;
ActivationArguments activation = ActivationArguments();
ElementAmax* amax_D_ptr = nullptr;
ElementAmax* amax_aux_ptr = nullptr;
using StrideAux = cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>;
ElementAux* aux_ptr = nullptr;
StrideAux dAux = {};
operator typename Impl::Arguments() const {
// Only compute amax_d if D is fp8
ElementAmax* amax_D_ptr_ = nullptr;
if constexpr (detail::is_fp8_v<ElementOutput>) {
amax_D_ptr_ = amax_D_ptr;
}
// Aux is fp8 -> DAG arguments
if constexpr (detail::is_fp8_v<ElementAux>) {
typename Impl::Arguments args;
// always use structured binding to unpack DAG args since it may or may not be a tuple
auto& [Z_args, aux_args, D_args] = args;
Z_args =
{ // ternary op : (scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias)
{{scale_c, beta},
{scale_c_ptr, beta_ptr}
}, // leaf args : (scale_c * beta)
{}, // leaf args : C
{ // ternary op : (scale_a * scale_b * alpha) * acc + bias
{{scale_a, scale_b, alpha},
{scale_a_ptr, scale_b_ptr, alpha_ptr}
}, // leaf args : (scale_a * scale_b * alpha)
{}, // leaf args : acc
{bias_ptr, ElementBias(0), dBias}, // leaf args : bias
{} // ternary args : multiply_add
}, // end ternary op
{} // ternary args : multiply_add
}; // end ternary op
D_args =
{ // binary op : activation(Z) * scale_d or activation(Z)
{ // unary op : reduce(activation(Z))
{ // unary op : activation(Z)
{}, // leaf args : Z
activation // unary args : activation
}, // end unary op
{amax_D_ptr_} // unary args : reduce
}, // end unary op
{{scale_d},
{scale_d_ptr}
}, // leaf args : scale_d
{} // binary args : multiplies or first
}; // end binary op
aux_args =
{ // unary op : store(Aux)
{ // binary op : Z * scale_d or Z
{ // unary op : reduce(Z)
{}, // leaf args : Z
{amax_aux_ptr} // unary args : reduce
}, // end unary op
{{scale_aux},
{scale_aux_ptr}
}, // leaf args : scale_d
{} // binary args : multiplies
}, // end binary op
{aux_ptr, dAux} // unary args : store
}; // end unary op
return args;
}
// Aux is not fp8 -> Tree arguments
else {
return
{ // binary op : activation(Z) * scale_d or activation(Z)
{ // unary op : reduce(activation(Z))
{ // unary op : activation(Z)
{ // unary op : store(Z)
{ // ternary op : (scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias)
{{scale_c, beta},
{scale_c_ptr, beta_ptr}
}, // leaf args : (scale_c * beta)
{}, // leaf args : C
{ // ternary op : (scale_a * scale_b * alpha) * acc + bias
{{scale_a, scale_b, alpha},
{scale_a_ptr, scale_b_ptr, alpha_ptr}
}, // leaf args : (scale_a * scale_b * alpha)
{}, // leaf args : acc
{bias_ptr, ElementBias(0), dBias
}, // leaf args : bias
{} // ternary args : multiply_add
}, // end ternary op
{} // ternary args : multiply_add
}, // end ternary op
{aux_ptr, dAux} // unary args : store
}, // end unary op
activation // unary args : activation
}, // end unary op
{amax_D_ptr_} // unary args : reduce
}, // end unary op
{{scale_d},{scale_d_ptr}}, // leaf args : scale_d
{} // binary args : multiplies or first
}; // end binary op
}
}
};
// Ctor inheritance
using Impl::Impl;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template<
class CtaTileShapeMNK,
class EpilogueTile,
int Stages,
class StrideAux,
class SmemLayoutAtom,
class CopyOpS2R,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementAux = ElementOutput,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
int AlignmentAux = 128 / sizeof_bits_v<ElementAux>,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinCombDeEltAct =
Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>, // activation(beta * C + (alpha * acc), aux)
Sm90LinearCombination<ElementCompute, ElementCompute, ElementSource, ElementScalar, RoundStyle>, // beta * C + (alpha * acc)
Sm90AuxLoad<Stages, EpilogueTile, ElementAux, StrideAux, SmemLayoutAtom, CopyOpS2R, AlignmentAux> // aux
>;
template <
int StagesC,
int StagesD,
int FragmentSize,
bool ReuseSmemC,
bool DelayTmaStore,
class GmemLayoutTagAux,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementAux,
class ElementSource,
class ElementScalar,
int AlignmentAux,
FloatRoundStyle RoundStyle,
class CtaTileShapeMNK,
class EpilogueTile,
class SmemLayoutAtom,
class CopyOpS2R
>
struct FusionCallbacks<
epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
fusion::LinCombDeEltAct<
GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute,
ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle
>,
CtaTileShapeMNK,
EpilogueTile,
SmemLayoutAtom,
CopyOpS2R
> : Sm90LinCombDeEltAct<
CtaTileShapeMNK, EpilogueTile, StagesC, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpS2R, ActivationFn,
ElementOutput, ElementCompute, ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle
> {
using Impl =
Sm90LinCombDeEltAct<
CtaTileShapeMNK, EpilogueTile, StagesC, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpS2R, ActivationFn,
ElementOutput, ElementCompute, ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle
>;
using Operation =
fusion::LinCombDeEltAct<
GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute,
ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle
>;
struct Arguments {
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
ElementScalar const* alpha_ptr = nullptr;
ElementScalar const* beta_ptr = nullptr;
using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments;
ActivationArguments activation = ActivationArguments();
using StrideAux = cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>;
ElementAux const* aux_ptr = nullptr;
StrideAux dAux = {};
operator typename Impl::Arguments() const {
return
{ // binary op : activation(beta * C + (alpha * acc), aux)
{ // ternary op : beta * C + (alpha * acc)
{{beta}, {beta_ptr}}, // leaf args : beta
{}, // leaf args : C
{ // binary op : alpha * acc
{{alpha}, {alpha_ptr}}, // leaf args : alpha
{}, // leaf args : acc
{} // binary args : multiplies
}, // end binary op
{} // ternary args : multiply_add
}, // end ternary op
{aux_ptr, ElementAux(0), dAux}, // leaf args : aux
activation // binary args : activation
}; // end binary op
}
};
// Ctor inheritance
using Impl::Impl;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template<
class CtaTileShapeMNK,
class EpilogueTile,
int Stages,
class StrideAux,
class SmemLayoutAtom,
class CopyOpS2R,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementAux = ElementOutput,
class ElementBias = ElementOutput,
class ElementSource = ElementOutput,
class ElementScalar = ElementCompute,
int AlignmentAux = 128 / sizeof_bits_v<ElementAux>,
int AlignmentBias = 128 / sizeof_bits_v<ElementBias>,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinCombDeEltActDePerRowBias =
Sm90EVT<Sm90Compute<cutlass::epilogue::thread::Identity, ElementOutput, ElementCompute, RoundStyle>, // Identity for final conversion
Sm90EVT<Sm90ColReduction<plus, plus, plus, 0, CtaTileShapeMNK,
ElementBias, ElementCompute, RoundStyle, Stride<_1,_0,int>, AlignmentBias>,
Sm90LinCombDeEltAct<CtaTileShapeMNK, EpilogueTile, Stages, StrideAux, SmemLayoutAtom, CopyOpS2R, ActivationFn,
ElementCompute, ElementCompute, ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle>
>
>;
template <
int StagesC,
int StagesD,
int FragmentSize,
bool ReuseSmemC,
bool DelayTmaStore,
class GmemLayoutTagAux,
template <class> class ActivationFn,
class ElementOutput,
class ElementCompute,
class ElementAux,
class ElementBias,
class ElementSource,
class ElementScalar,
int AlignmentAux,
int AlignmentBias,
FloatRoundStyle RoundStyle,
class CtaTileShapeMNK,
class EpilogueTile,
class SmemLayoutAtom,
class CopyOpS2R
>
struct FusionCallbacks<
epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
fusion::LinCombDeEltActDePerRowBias<
GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute,
ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle
>,
CtaTileShapeMNK,
EpilogueTile,
SmemLayoutAtom,
CopyOpS2R
> : Sm90LinCombDeEltActDePerRowBias<
CtaTileShapeMNK, EpilogueTile, StagesC, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpS2R, ActivationFn,
ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle
> {
using Impl =
Sm90LinCombDeEltActDePerRowBias<
CtaTileShapeMNK, EpilogueTile, StagesC, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpS2R, ActivationFn,
ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle
>;
using Operation =
fusion::LinCombDeEltActDePerRowBias<
GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute,
ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle
>;
struct Arguments {
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
ElementScalar const* alpha_ptr = nullptr;
ElementScalar const* beta_ptr = nullptr;
using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments;
ActivationArguments activation = ActivationArguments();
using StrideAux = cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>;
ElementAux const* aux_ptr = nullptr;
StrideAux dAux = {};
using StrideBias = Stride<_1,_0,int>;
ElementBias* dbias_ptr = nullptr;
StrideBias dDbias = {};
operator typename Impl::Arguments() const {
return
{ // unary op : identity/convert
{ // unary op : reduce(activation(beta * C + (alpha * acc), aux))
{ // binary op : activation(beta * C + (alpha * acc), aux)
{ // ternary op : beta * C + (alpha * acc)
{{beta}, {beta_ptr}}, // leaf args : beta
{}, // leaf args : C
{ // binary op : alpha * acc
{{alpha}, {alpha_ptr}}, // leaf args : alpha
{}, // leaf args : acc
{} // binary args : multiplies
}, // end binary op
{} // ternary args : multiply_add
}, // end ternary op
{aux_ptr, ElementAux(0), dAux}, // leaf args : aux
activation // binary args : activation
}, // end binary op
{dbias_ptr, ElementCompute(0), dDbias} // unary args : reduce
}, // end unary op
{} // unary args : identity/convert
}; // end unary op
}
};
// Ctor inheritance
using Impl::Impl;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <class FusionOpOrCallbacks, class = cute::void_t<>>
struct get_element_aux {
using type = void;
};
template <class FusionOpOrCallbacks>
struct get_element_aux<FusionOpOrCallbacks, cute::void_t<typename FusionOpOrCallbacks::ElementAux>> {
using type = typename FusionOpOrCallbacks::ElementAux;
};
template <class NodeOp, class... ChildOps>
struct get_element_aux<Sm90TreeVisitor<NodeOp, ChildOps...>, cute::void_t<>> {
using type = typename get_element_aux<NodeOp>::type;
};
template <class... Ts>
struct get_element_aux<FusionCallbacks<Ts...>, cute::void_t<typename FusionCallbacks<Ts...>::Operation>> {
private:
using Operation = typename FusionCallbacks<Ts...>::Operation;
public:
using type = typename get_element_aux<Operation>::type;
};
} // namespace cutlass:epilogue::fusion::detail
template <class Callbacks>
using get_element_aux_t = typename detail::get_element_aux<Callbacks>::type;
} // namespace cutlass::epilogue::fusion
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp",
"repo_id": "include",
"token_count": 21890
} | 30 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations with a generic element-wise activation
function. Scaling factors are applied to operands A, B, and C. The pre-activation auxiliary
output is also returned.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/scale_type.h"
#include "cutlass/epilogue/thread/linear_combination_generic.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements.
///
/// Aux = ((alpha * scale_a * scale_b) * accumulator) + ((beta * scale_c) * source) + bias
/// D = activation(Aux)
///
template <
template<typename T> class ActivationFunctor,
typename ElementOutput_, ///< Data type used to load and store tensors
typename ElementAuxOutput_, ///< Data type used to store auxiliary output
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
bool IsHeavy = false
>
class LinearCombinationGenericWithScalingAndAbsMax {
public:
using ElementOutput = ElementOutput_;
using ElementAuxOutput = ElementAuxOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementScalingFactor = ElementAccumulator_;
/// Data type used for absolute maximum value
using ElementAbsmax = float;
static bool const kIsScalingAndAmaxAuxOutputNeeded = (platform::is_same<ElementAuxOutput, cutlass::float_e4m3_t>::value ||
platform::is_same<ElementAuxOutput, cutlass::float_e5m2_t>::value);
static bool const kIsScalingAndAmaxOutputNeeded = (platform::is_same<ElementOutput, cutlass::float_e4m3_t>::value ||
platform::is_same<ElementOutput, cutlass::float_e5m2_t>::value);
static bool const kIsHeavy = IsHeavy;
static int const kCount = Count;
static const ScaleType::Kind kScale = Scale;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAuxOutput = Array<ElementAuxOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentCompute = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params {
struct ActivationParams
: LinearCombinationGenericParams<ElementCompute>,
GenericActivationTraits<ActivationFunctor<ElementCompute>>::Arguments {
using LinearCombinationGenericParams<ElementCompute>::LinearCombinationGenericParams;
};
ActivationParams activation;
ElementScalingFactor const* scale_a_ptr = nullptr; ///< pointer to a scalar - if not null, loads it from memory
ElementScalingFactor const* scale_b_ptr = nullptr; ///< pointer to b scalar - if not null, loads it from memory
ElementScalingFactor const* scale_c_ptr = nullptr; ///< pointer to c scalar - if not null, loads it from memory
ElementScalingFactor const* scale_d_ptr = nullptr; ///< pointer to d scalar - if not null, loads it from memory
ElementScalingFactor const* scale_aux_ptr = nullptr; ///< pointer to aux scalar - if not null, loads it from memory
ElementAbsmax * abs_max_aux_ptr = nullptr; ///< pointer to location to store amax of Aux
ElementAbsmax * abs_max_D_ptr = nullptr; ///< pointer to location to store amax of D
CUTLASS_HOST_DEVICE
Params() :
scale_a_ptr(nullptr),
scale_b_ptr(nullptr),
scale_c_ptr(nullptr),
scale_d_ptr(nullptr),
scale_aux_ptr(nullptr),
abs_max_aux_ptr(nullptr),
abs_max_D_ptr(nullptr) {}
CUTLASS_HOST_DEVICE
Params(ActivationParams activation_params,
ElementScalingFactor const* scale_a_ptr,
ElementScalingFactor const* scale_b_ptr,
ElementScalingFactor const* scale_c_ptr,
ElementScalingFactor const* scale_d_ptr,
ElementScalingFactor const* scale_aux_ptr,
ElementAbsmax * abs_max_aux_ptr,
ElementAbsmax * abs_max_D_ptr) :
activation(activation_params),
scale_a_ptr(scale_a_ptr),
scale_b_ptr(scale_b_ptr),
scale_c_ptr(scale_c_ptr),
scale_d_ptr(scale_d_ptr),
scale_aux_ptr(scale_aux_ptr),
abs_max_aux_ptr(abs_max_aux_ptr),
abs_max_D_ptr(abs_max_D_ptr) {}
};
private:
//
// Data members
//
Params params_;
bool skip_elementwise_;
// Scaling factors for output and auxiliary output
ElementCompute scale_d_;
ElementCompute scale_aux_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationGenericWithScalingAndAbsMax(Params const ¶ms) :
params_(params),
skip_elementwise_(false),
scale_d_(ElementCompute(params.scale_d_ptr ? *(params.scale_d_ptr) : ElementScalingFactor(1))),
scale_aux_(ElementCompute(params.scale_aux_ptr ? *(params.scale_aux_ptr) : ElementScalingFactor(1)))
{
params_.activation.alpha = (params.activation.alpha_ptr ? *params.activation.alpha_ptr : params.activation.alpha);
params_.activation.beta = (params.activation.beta_ptr ? *params.activation.beta_ptr : params.activation.beta);
auto scale_a =
ElementCompute(params.scale_a_ptr ? *(params.scale_a_ptr) : ElementScalingFactor(1));
auto scale_b =
ElementCompute(params.scale_b_ptr ? *(params.scale_b_ptr) : ElementScalingFactor(1));
auto scale_c =
ElementCompute(params.scale_c_ptr ? *(params.scale_c_ptr) : ElementScalingFactor(1));
multiplies<ElementCompute> multiply;
params_.activation.alpha = multiply(params.activation.alpha, multiply(scale_a, scale_b));
params_.activation.beta = multiply(params.activation.beta, scale_c);
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return params_.activation.beta != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
params_.activation.beta = ElementCompute(1);
}
// Only the final partition should perform the activation function
// and scale the output and auxiliary output values.
if (k_partition != k_partition_count - 1) {
skip_elementwise_ = true;
scale_d_ = ElementCompute(1.);
scale_aux_ = ElementCompute(1.);
}
}
/// Computes linear scaling:
/// Aux = (alpha * scale_a * scale_b * accumulator) + (beta * scale_c * source) + bias
/// D = activation(Aux)
CUTLASS_HOST_DEVICE
void operator()(
FragmentCompute& output,
FragmentCompute& aux_output,
FragmentAccumulator const &accumulator,
FragmentCompute const& bias,
FragmentOutput const &source) {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> multiply;
plus<FragmentCompute> add;
multiply_add<FragmentCompute> mul_add_accumulator;
ActivationFunctor<FragmentCompute> activation;
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(params_.activation.alpha, converted_accumulator, intermediate);
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = multiply(params_.activation.beta, converted_source);
intermediate = mul_add_accumulator(params_.activation.alpha, converted_accumulator, intermediate);
}
intermediate = add(intermediate, bias);
aux_output = intermediate;
if constexpr (GenericActivationTraits<ActivationFunctor<ElementCompute>>::IsArgumentsNeeded) {
output = skip_elementwise_ ? intermediate : activation(intermediate, params_.activation);
} else {
output = skip_elementwise_ ? intermediate : activation(intermediate);
}
}
/// Computes linear scaling:
/// Aux = (alpha * scale_a * scale_b * accumulator) + bias
/// D = activation(Aux)
CUTLASS_DEVICE
void operator()(
FragmentCompute& output,
FragmentCompute& aux_output,
FragmentAccumulator const &accumulator,
FragmentCompute const& bias) {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> multiply;
plus<FragmentCompute> add;
ActivationFunctor<FragmentCompute> activation;
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = multiply(params_.activation.alpha, converted_accumulator);
}
intermediate = add(intermediate, bias);
aux_output = intermediate;
if constexpr (GenericActivationTraits<ActivationFunctor<FragmentCompute>>::IsArgumentsNeeded) {
output = skip_elementwise_ ? intermediate : activation(intermediate, params_.activation);
} else {
output = skip_elementwise_ ? intermediate : activation(intermediate);
}
}
CUTLASS_HOST_DEVICE
ElementAbsmax* get_ptr_output_abs_max() const {
return params_.abs_max_D_ptr;
}
CUTLASS_HOST_DEVICE
ElementAbsmax* get_ptr_aux_output_abs_max() const {
return params_.abs_max_aux_ptr;
}
CUTLASS_HOST_DEVICE
ElementCompute get_scale_d() const {
return scale_d_;
}
CUTLASS_HOST_DEVICE
ElementCompute get_scale_aux() const {
return scale_aux_;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
| include/cutlass/epilogue/thread/linear_combination_generic_with_scaling.h/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_generic_with_scaling.h",
"repo_id": "include",
"token_count": 4504
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <type_traits>
#include <utility>
#endif
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
//
// This is used for metaprogramming epilogue functors. If they define
// `static bool const kIsHeavy = true;`, then the epilogue functor itself is
// not inlined. This results in smaller code and is advantageous if the epilogue
// functor consists of many instructions.
//
// If the epilogue functor does not define `kIsHeavy` or if it is `false`, then
// the behavior from CUTLASS 2.5 and before is retained. The epilogue is fully
// unrolled and inlined.
//
template<class>
struct TypeSink { typedef void type; };
template<class T> using TypeSinkT = typename TypeSink<T>::type;
template<class T, class=void> struct IsEpilogueFunctorHeavy {
static bool const value = false;
};
template<class T> struct IsEpilogueFunctorHeavy<T, TypeSinkT< decltype( T::kIsHeavy ) > > {
static bool const value = T::kIsHeavy;
};
////////////////////////////////////////////////////////////////////////////////
/// Base class for epilogues defining warp-level
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpShape_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int FragmentsPerIteration = 1
>
class EpilogueBase {
public:
using Shape = Shape_;
using WarpShape = WarpShape_;
static int const kPartitionsK = PartitionsK;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using Padding = Padding_;
/// Output layout is always row-major
using Layout = layout::RowMajor;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename AccumulatorTile::Element;
/// Number of warps
using WarpCount = gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
kPartitionsK
>;
/// Use this to control the granularity of one epilogue 'iteration'
static int const kFragmentsPerIteration = FragmentsPerIteration;
public:
/// Shared storage allocation needed by the epilogue
struct SharedStorage {
//
// Type definitions
//
/// Element type of shared memory
using Element = typename WarpTileIterator::Element;
/// Tensor reference to shared memory allocation
using TensorRef = typename WarpTileIterator::TensorRef;
/// Layout of shared memory allocation
using Layout = typename WarpTileIterator::Layout;
/// Logical shape of the shared memory tile written to by all warps.
using Shape = MatrixShape<
WarpCount::kM * WarpTileIterator::Shape::kRow * WarpCount::kK,
WarpCount::kN * WarpTileIterator::Shape::kColumn
>;
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
(Shape::kRow + Padding::kRow) * kFragmentsPerIteration,
Shape::kColumn + Padding::kColumn
>;
//
// Data members
//
AlignedBuffer<Element, StorageShape::kCount> storage;
//
// Methods
//
/// Returns a pointer to the shared memory buffer
CUTLASS_DEVICE
Element *data() {
return storage.data();
}
/// Returns a tensor reference to the shared memory buffer
CUTLASS_DEVICE
TensorRef reference() {
return TensorRef(
storage.data(),
Layout::packed({StorageShape::kRow, StorageShape::kColumn}));
}
};
protected:
//
// Data members
//
SharedStorage &shared_storage_;
/// Stores a warp's fragment of accumulators to SMEM
WarpTileIterator warp_tile_iterator_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueBase(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
shared_storage_(shared_storage),
warp_tile_iterator_(shared_storage.reference(), lane_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to three coordinates:
//
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_k = warp_idx / (WarpCount::kM * WarpCount::kN);
int warp_mn = warp_idx % (WarpCount::kM * WarpCount::kN);
int warp_m = warp_mn % WarpCount::kM;
int warp_n = warp_mn / WarpCount::kM;
MatrixCoord warp_offset{warp_k * WarpCount::kM + warp_m, warp_n};
warp_tile_iterator_.add_tile_offset(warp_offset);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_base.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_base.h",
"repo_id": "include",
"token_count": 2588
} | 32 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Visitor tree compute operations for the CUTLASS 2x epilogue
*/
#pragma once
#include "cutlass/epilogue/threadblock/fusion/visitor_2x.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::threadblock {
using namespace cute;
using namespace detail;
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// N-nary Elementwise Compute Operation
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template<
template <class> class ComputeFn,
class ElementOutput,
class ElementCompute,
FloatRoundStyle RoundStyle,
class = void
>
struct VisitorCompute : VisitorImpl2x<> {
using VisitorImpl2x<>::VisitorImpl2x;
struct Callbacks : EmptyCallbacks {
template <typename ElementAccumulator, typename... ElementInputs, int FragmentSize>
CUTLASS_DEVICE Array<ElementOutput, FragmentSize>
visit(int iter_idx, int row_idx, int column_idx, int frg_idx,
Array<ElementAccumulator, FragmentSize> const& frg_acc,
Array<ElementInputs, FragmentSize> const&... frg_inputs) {
return transform_apply(cute::make_tuple(frg_inputs...),
[&] (auto&& frg_input) {
using ElementInput = typename cute::remove_cvref_t<decltype(frg_input)>::Element;
using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>;
ConvertInput convert_input{};
return convert_input(frg_input);
},
[&] (auto&&... cvt_frg_inputs) {
using ComputeOutput = ComputeFn<Array<ElementCompute, FragmentSize>>;
using ConvertOutput = NumericArrayConverter<ElementOutput, ElementCompute, FragmentSize, RoundStyle>;
ComputeOutput compute_output{};
ConvertOutput convert_output{};
return convert_output(compute_output(cvt_frg_inputs...));
}
);
}
};
template <class ProblemShape>
CUTLASS_DEVICE auto
get_callbacks(
gemm::GemmCoord threadblock_tile_offset,
int thread_idx,
ProblemShape problem_shape
) {
return Callbacks();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::threadblock
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/fusion/visitor_compute.hpp/0 | {
"file_path": "include/cutlass/epilogue/threadblock/fusion/visitor_compute.hpp",
"repo_id": "include",
"token_count": 1255
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load output tile from shared memory in epilogue.
///
/// Satisfies: ReadableTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
int MaxAlignment = ThreadMap_::kElementsPerAccess * sizeof_bits<Element_>::value / 8
>
class SharedLoadIterator {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::TileShape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kMinAlignment = ThreadMap_::kElementsPerAccess * sizeof_bits<Element_>::value / 8;
static int const kAlignment = (MaxAlignment < kMinAlignment ? MaxAlignment : kMinAlignment);
static int const kThreads = ThreadMap::kThreads;
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster *
ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<
Element,
ThreadMap::kElementsPerAccess,
kAlignment>;
/// Vector type used for SMEM loads
using LoadType = AlignedArray<
Element,
const_min(128 / sizeof_bits<Element>::value, ThreadMap::kElementsPerAccess),
const_min(16, kAlignment)
>;
static int const kLoadsPerAccess = AccessType::kElements / LoadType::kElements;
private:
//
// Data members
//
/// Byte-level pointer
uint8_t *byte_pointer_;
/// Stride along adjacent rows
int stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
SharedLoadIterator(
TensorRef ref,
int thread_idx
):
byte_pointer_(reinterpret_cast<uint8_t *>(ref.data())),
stride_((ref.stride(0) * sizeof_bits<Element>::value) / 8) {
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx);
// Initialize pointer
byte_pointer_ +=
thread_offset.row() * stride_ +
thread_offset.column() * sizeof(AccessType) / kElementsPerAccess;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &offset) {
byte_pointer_ +=
offset.row() * Shape::kRow * stride_ +
offset.column() * Shape::kColumn * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
uint8_t const *byte_pointer = byte_pointer_ +
row * ThreadMap::Delta::kRow * stride_ +
group * ThreadMap::Delta::kGroup* stride_ +
cluster * ThreadMap::Delta::kCluster * stride_ +
pointer_offset * sizeof_bits<Element>::value / 8;
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag);
LoadType const *memory_pointer = reinterpret_cast<LoadType const *>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kLoadsPerAccess; ++v) {
frag_ptr[frag_idx * kLoadsPerAccess + v] =
memory_pointer[(column * ThreadMap::Delta::kColumn / kElementsPerAccess) * kLoadsPerAccess + v];
}
}
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void set_smem_base_address(Index address) {
}
/// Loads a fragment
CUTLASS_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/shared_load_iterator.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/shared_load_iterator.h",
"repo_id": "include",
"token_count": 2462
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic structures needed for implementing the warp-scoped phase of the epilogue.
These quantities assume a 'column-major' arrangement of TensorOp instructions, of which
a row-oriented slice is visible per iteration.
*/
#pragma once
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Policy details related to the epilogue
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename InterleavedTileShape, ///< shape of indivisible instruction-level arrangement (concept: GemmShape)
typename ElementC, ///< Accumulator layout
typename Layout ///< target shared memory layout
>
struct VoltaTensorOpPolicy;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major
template <
typename WarpShape_ ///< shape of warp-level GEMM (concept: GemmShape)
>
struct VoltaTensorOpPolicy<WarpShape_, gemm::GemmShape<32, 32, 4>, half_t, layout::RowMajor> {
using WarpShape = WarpShape_;
using InterleavedTileShape = gemm::GemmShape<32, 32, 4>;
using ElementC = half_t;
using Layout = layout::RowMajor;
/// Shape of one warp-levelinstruction
using InstructionShape = gemm::GemmShape<16, 16, 4>;
/// Number of mma operations performed for one 32x32x4 interleaved tile
using MmaIterations = MatrixShape<
InterleavedTileShape::kM / InstructionShape::kM,
InterleavedTileShape::kN / InstructionShape::kN
>;
/// Number of 32x32x4 interleaved tiles performed to cover the warp-level GEMM shape
using TileIterations = MatrixShape<
WarpShape::kM / InterleavedTileShape::kM,
WarpShape::kN / InterleavedTileShape::kN
>;
/// Number of accumulator elements owned by each thread per Mma
static int const kElementsPerMma = 8;
static int const kRowsPerIteration = 16;
//
// Hard-coded constants regarding Tensor Operations
//
/// Number of accumulator elements stored per memory instruction to shared memory
static int const kElementsPerAccess = 4;
/// Number of accesses performed per interleaved tile
static int const kAccessesPerInterleavedTile = 4;
/// Total number of iterations needed to cover the entire tile
static int const kIterations = TileIterations::kRow * 2;
//
// Derived types
//
/// Array type for aligned memory accesses
using AccessType = AlignedArray<ElementC, kElementsPerAccess>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
ElementC,
kElementsPerAccess * kAccessesPerInterleavedTile * TileIterations::kColumn>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
ElementC,
TileIterations::kCount * MmaIterations::kCount * kElementsPerMma>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major
template <
typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape)
>
struct VoltaTensorOpPolicy<WarpShape_, gemm::GemmShape<32, 32, 4>, float, layout::RowMajor> {
using WarpShape = WarpShape_;
using InterleavedTileShape = gemm::GemmShape<32, 32, 4>;
using ElementC = float;
using Layout = layout::RowMajor;
/// Shape of one warp-levelinstruction
using InstructionShape = gemm::GemmShape<16, 16, 4>;
/// Number of mma operations performed for one 32x32x4 interleaved tile
using MmaIterations = MatrixShape<
InterleavedTileShape::kM / InstructionShape::kM,
InterleavedTileShape::kN / InstructionShape::kN
>;
/// Number of 32x32x4 interleaved tiles performed to cover the warp-level GEMM shape
using TileIterations = MatrixShape<
WarpShape::kM / InterleavedTileShape::kM,
WarpShape::kN / InterleavedTileShape::kN
>;
/// Number of accumulator elements owned by each thread per Mma
static int const kElementsPerMma = 8;
static int const kRowsPerIteration = 16;
//
// Hard-coded constants regarding Tensor Operations
//
/// Number of accumulator elements stored per memory instruction to shared memory
static int const kElementsPerAccess = 2;
/// Number of accesses performed per interleaved tile
static int const kAccessesPerInterleavedTile = 8;
/// Number of rows per interleaved tile
static int const kRowsPerMmaTile = 2;
/// Total number of iterations needed to cover the entire tile
static int const kIterations = TileIterations::kRow * MmaIterations::kRow;
//
// Derived types
//
/// Array type for aligned memory accesses
using AccessType = AlignedArray<ElementC, kElementsPerAccess>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
ElementC,
kElementsPerAccess * kAccessesPerInterleavedTile * TileIterations::kColumn>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
ElementC,
TileIterations::kCount * MmaIterations::kCount * kElementsPerMma>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/warp/volta_tensor_op_policy.h/0 | {
"file_path": "include/cutlass/epilogue/warp/volta_tensor_op_policy.h",
"repo_id": "include",
"token_count": 2098
} | 35 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/detail/dependent_false.hpp"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/detail/layout.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/transform/collective/sm90_wgmma_transpose.hpp"
#include "cutlass/trace.h"
#include "cute/arch/cluster_sm90.hpp"
#include "cute/arch/copy_sm90.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cute/tensor_predicate.hpp"
#include "cute/numeric/arithmetic_tuple.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
// WarpSpecialized Mainloop that source A operand from registers
template <
int Stages,
class ClusterShape,
class KernelSchedule,
class TileShape_,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm90TmaGmmaRmemAWarpSpecialized<Stages, ClusterShape, KernelSchedule>,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm90TmaGmmaRmemAWarpSpecialized<Stages, ClusterShape, KernelSchedule>;
using TileShape = TileShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using CtaShape_MNK = decltype(shape_div(TileShape{}, ClusterShape{}));
// Swap and transpose A/B for A k-major layout and B mn-major layout since WGMMA is k-major only (e.g. tf32, Fp32, Int8, Fp8 WGMMA)
static constexpr bool IsLayoutAkBmn =
cute::is_same_v<gemm::detail::StrideToLayoutTagA_t<StrideA>, layout::RowMajor> &&
cute::is_same_v<gemm::detail::StrideToLayoutTagB_t<StrideB>, layout::RowMajor>;
static constexpr bool IsInputSizeTwoBytes = sizeof(ElementA) == 2 && sizeof(ElementB) == 2;
static constexpr bool SwapAB = !IsInputSizeTwoBytes && IsLayoutAkBmn;
using InternalSmemLayoutAtomA = cute::conditional_t<!SwapAB, SmemLayoutAtomA, SmemLayoutAtomB>;
using InternalSmemLayoutAtomB = cute::conditional_t<!SwapAB, SmemLayoutAtomB, SmemLayoutAtomA>;
using InternalSmemCopyAtomA = cute::conditional_t<!SwapAB, SmemCopyAtomA, SmemCopyAtomB>;
using InternalSmemCopyAtomB = cute::conditional_t<!SwapAB, SmemCopyAtomB, SmemCopyAtomA>;
// TMA converts f32 input to tf32 when copying from GMEM to SMEM
// For all other types, cast to size equivalent uint type to avoid any rounding by TMA.
static constexpr bool ConvertF32toTF32A = cute::is_same_v<float, ElementA>;
static constexpr bool ConvertF32toTF32B = cute::is_same_v<float, ElementB>;
using ConvertedElementA = cute::conditional_t<ConvertF32toTF32A, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementA>>>;
using ConvertedElementB = cute::conditional_t<ConvertF32toTF32B, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementB>>>;
using InternalElementA = cute::conditional_t<!SwapAB, ConvertedElementA, ConvertedElementB>;
using InternalElementB = cute::conditional_t<!SwapAB, ConvertedElementB, ConvertedElementA>;
using InternalStrideA = cute::conditional_t<!SwapAB, StrideA, StrideB>;
using InternalStrideB = cute::conditional_t<!SwapAB, StrideB, StrideA>;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
using MainloopPipeline = cutlass::PipelineTmaAsync<DispatchPolicy::Stages>;
using PipelineState = cutlass::PipelineState<DispatchPolicy::Stages>;
using PipelineParams = typename MainloopPipeline::Params;
static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
// Tile along modes in a way that maximizes the TMA box size.
using SmemLayoutA = decltype(tile_to_shape(
InternalSmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}),
cute::conditional_t< ::cutlass::gemm::detail::is_major<0,InternalStrideA>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{}));
using SmemLayoutB = decltype(tile_to_shape(
InternalSmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}),
cute::conditional_t< ::cutlass::gemm::detail::is_major<0,InternalStrideB>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{}));
// If A mn-layout and B mn-layout, transposing B matrix since WGMMA is k-major only (e.g. tf32, fp32, fp8, int8).
static constexpr bool IsLayoutAmnBmn =
cute::is_same_v<gemm::detail::StrideToLayoutTagA_t<StrideA>, layout::ColumnMajor> &&
cute::is_same_v<gemm::detail::StrideToLayoutTagB_t<StrideB>, layout::RowMajor>;
static constexpr bool TransposeB = !IsInputSizeTwoBytes && IsLayoutAmnBmn;
using TransposeOperandB = decltype(cutlass::transform::collective::detail::make_transpose_operand_b(
0, 0, TiledMma{}, SmemLayoutB{}, InternalSmemLayoutAtomB{},
InternalElementB{}, cute::bool_constant<TransposeB>{}));
static_assert(DispatchPolicy::Stages >= 2, "Specialization requires Stages set to value 2 or more.");
static_assert(not cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeA>::value &&
cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeB>::value,
"MMA atom must source A from rmem and B operand from smem_desc for this mainloop.");
static_assert(cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>,
"GmemTiledCopy - invalid SM90 TMA copy atom specified.");
static_assert(cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>,
"GmemTiledCopy - invalid SM90 TMA copy atom specified.");
using GmmaSmemLayoutAtomB = decltype(transform::collective::detail::gmma_smem_transpose_or_passthrough<
TransposeB, InternalSmemLayoutAtomB, InternalElementB>());
// SmemLayoutB for GMMA is different from SmemLayoutB for TMA if TransposeB
using GmmaSmemLayoutB = decltype(tile_to_shape(
GmmaSmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}),
cute::conditional_t< ::cutlass::gemm::detail::is_major<0,InternalStrideB>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{}));
static_assert(!SwapAB || !TransposeB, "Cannot SwapAB and TransposeB at the same time.");
static_assert(TransposeB xor (cute::is_same_v<SmemLayoutB, GmmaSmemLayoutB>),
"Should be same layout if not TransposeB.");
static_assert(!TransposeB || (cutlass::bits_to_bytes((size<1>(SmemLayoutB{}) * sizeof_bits<InternalElementB>::value))) == 128,
"SmemLayoutB K must be 128bytes to be transposed.");
static constexpr bool uses_universal_transposition() {
if constexpr (TransposeB) {
return transform::collective::detail::use_universal_transposition<InternalSmemLayoutAtomB, InternalElementB>();
}
else {
return false;
}
}
static_assert(!uses_universal_transposition(),
"Warp specialized ARF kernels have not supported universal B transposition yet.");
static constexpr size_t SmemAlignmentA = cutlass::detail::alignment_for_swizzle(SmemLayoutA{});
static constexpr size_t SmemAlignmentB = cutlass::detail::alignment_for_swizzle(SmemLayoutB{});
static_assert(SmemAlignmentA >= 128 and SmemAlignmentB >= 128, "Require at least 128B alignment");
struct SharedStorage
{
struct TensorStorage : cute::aligned_struct<cute::max(SmemAlignmentA, SmemAlignmentB)> {
cute::array_aligned<typename TiledMma::ValTypeA, cute::cosize_v<SmemLayoutA>, SmemAlignmentA> smem_A;
cute::array_aligned<typename TiledMma::ValTypeB, cute::cosize_v<SmemLayoutB>, SmemAlignmentB> smem_B;
} tensors;
using PipelineStorage = typename MainloopPipeline::SharedStorage;
PipelineStorage pipeline;
};
using TensorStorage = typename SharedStorage::TensorStorage;
using PipelineStorage = typename SharedStorage::PipelineStorage;
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A = nullptr;
StrideA dA{};
ElementB const* ptr_B = nullptr;
StrideB dB{};
uint32_t mma_promotion_interval = 4;
};
// Device side kernel params
struct Params {
// Assumption: StrideA is congruent with Problem_MK
using TMA_A = decltype(make_tma_copy(
GmemTiledCopyA{},
make_tensor(static_cast<InternalElementA const*>(nullptr), repeat_like(InternalStrideA{}, int32_t(0)), InternalStrideA{}),
SmemLayoutA{}(_,_,cute::Int<0>{}),
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})),
size<1>(ClusterShape{}))); // mcast along N mode for this M load, if any
// Assumption: StrideB is congruent with Problem_NK
using TMA_B = decltype(make_tma_copy(
GmemTiledCopyB{},
make_tensor(static_cast<InternalElementB const*>(nullptr), repeat_like(InternalStrideB{}, int32_t(0)), InternalStrideB{}),
SmemLayoutB{}(_,_,cute::Int<0>{}),
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})),
size<0>(ClusterShape{}))); // mcast along M mode for this N load, if any
TMA_A tma_load_a;
TMA_B tma_load_b;
};
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
(void) workspace;
// Optionally append 1s until problem shape is rank-4 (MNKL), in case it is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
if constexpr (SwapAB) {
M = get<1>(problem_shape_MNKL);
N = get<0>(problem_shape_MNKL);
}
InternalElementA const* ptr_A;
InternalStrideA dA;
InternalElementB const* ptr_B;
InternalStrideB dB;
if constexpr (not SwapAB) {
ptr_A = reinterpret_cast<InternalElementA const*>(args.ptr_A);
ptr_B = reinterpret_cast<InternalElementB const*>(args.ptr_B);
dA = args.dA;
dB = args.dB;
}
else {
ptr_A = reinterpret_cast<InternalElementA const*>(args.ptr_B);
ptr_B = reinterpret_cast<InternalElementB const*>(args.ptr_A);
dA = args.dB;
dB = args.dA;
}
Tensor tensor_a = make_tensor(ptr_A, make_layout(make_shape(M,K,L), dA));
Tensor tensor_b = make_tensor(ptr_B, make_layout(make_shape(N,K,L), dB));
typename Params::TMA_A tma_load_a = make_tma_copy(
GmemTiledCopyA{},
tensor_a,
SmemLayoutA{}(_,_,cute::Int<0>{}),
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})),
size<1>(ClusterShape{})); // mcast along N mode for this M load, if any
typename Params::TMA_B tma_load_b = make_tma_copy(
GmemTiledCopyB{},
tensor_b,
SmemLayoutB{}(_,_,cute::Int<0>{}),
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})),
size<0>(ClusterShape{})); // mcast along M mode for this N load, if any
return {
tma_load_a,
tma_load_b
};
}
template<class ProblemShape>
CUTLASS_HOST_DEVICE static bool
can_implement(
ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
constexpr int tma_alignment_bits = 128;
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
bool implementable = true;
constexpr int min_tma_aligned_elements_A = tma_alignment_bits / cutlass::sizeof_bits<ElementA>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_A>(cute::make_shape(M,K,L), StrideA{});
constexpr int min_tma_aligned_elements_B = tma_alignment_bits / cutlass::sizeof_bits<ElementB>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_B>(cute::make_shape(N,K,L), StrideB{});
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n");
}
return implementable;
}
static constexpr int K_PIPE_MAX = DispatchPolicy::Stages;
static constexpr uint32_t TmaTransactionBytes =
cutlass::bits_to_bytes(size<0>(SmemLayoutA{}) * size<1>(SmemLayoutA{}) * static_cast<uint32_t>(sizeof_bits<InternalElementA>::value)) +
cutlass::bits_to_bytes(size<0>(SmemLayoutB{}) * size<1>(SmemLayoutB{}) * static_cast<uint32_t>(sizeof_bits<InternalElementB>::value)) ;
/// Issue Tma Descriptor Prefetch -- ideally from a single thread for best performance
CUTLASS_DEVICE
static void prefetch_tma_descriptors(Params const& mainloop_params) {
cute::prefetch_tma_descriptor(mainloop_params.tma_load_a.get_tma_descriptor());
cute::prefetch_tma_descriptor(mainloop_params.tma_load_b.get_tma_descriptor());
}
/// Set up the data needed by this collective for load and mma.
/// Returns a tuple of tensors. The collective and the kernel layer have the contract
/// Returned tuple must contain at least two elements, with the first two elements being:
/// gA_mkl - The tma tensor, A after a local tile so it has shape (BLK_M,BLK_K,m,k,l)
/// gB_nkl - The tma tensor, B after a local tile so it has shape (BLK_N,BLK_K,n,k,l)
/// The rest of the tensors can be specified as needed by this collective.
template <class ProblemShape_MNKL>
CUTLASS_DEVICE auto
load_init(ProblemShape_MNKL const& problem_shape_MNKL, Params const& mainloop_params) const {
using X = Underscore;
// Separate out problem shape for convenience
auto [M,N,K,L] = problem_shape_MNKL;
// TMA requires special handling of strides to deal with coord codomain mapping
// Represent the full tensors -- get these from TMA
Tensor mA_mkl = mainloop_params.tma_load_a.get_tma_tensor(make_shape(M,K,L)); // (m,k,l)
Tensor mB_nkl = mainloop_params.tma_load_b.get_tma_tensor(make_shape(N,K,L)); // (n,k,l)
// Make tiled views, defer the slice
Tensor gA_mkl = local_tile(mA_mkl, TileShape{}, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l)
Tensor gB_nkl = local_tile(mB_nkl, TileShape{}, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l)
return cute::make_tuple(gA_mkl, gB_nkl);
}
/// Perform a collective-scoped matrix multiply-accumulate
/// Producer Perspective
template <
class TensorA, class TensorB,
class KTileIterator, class BlockCoord
>
CUTLASS_DEVICE void
load(
Params const& mainloop_params,
MainloopPipeline pipeline,
PipelineState smem_pipe_write,
cute::tuple<TensorA, TensorB> const& load_inputs,
BlockCoord const& blk_coord,
KTileIterator k_tile_iter, int k_tile_count,
int thread_idx,
uint32_t block_rank_in_cluster,
TensorStorage& shared_tensors) {
int lane_predicate = cute::elect_one_sync();
if (lane_predicate) {
Tensor sA_ = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB_ = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
Tensor sA = as_position_independent_swizzle_tensor(sA_); // (BLK_M,BLK_K,PIPE)
Tensor sB = as_position_independent_swizzle_tensor(sB_); // (BLK_N,BLK_K,PIPE)
//
// Prepare the TMA loads for A and B
//
constexpr uint32_t cluster_shape_x = get<0>(ClusterShape());
uint2 cluster_local_block_id = {block_rank_in_cluster % cluster_shape_x, block_rank_in_cluster / cluster_shape_x};
Tensor gA_mkl = get<0>(load_inputs);
Tensor gB_nkl = get<1>(load_inputs);
auto block_tma_a = mainloop_params.tma_load_a.get_slice(cluster_local_block_id.y);
auto block_tma_b = mainloop_params.tma_load_b.get_slice(cluster_local_block_id.x);
// Partition the inputs based on the current block coordinates.
auto [m_coord, n_coord, k_coord, l_coord] = blk_coord;
Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k)
Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k)
// Applies the mapping from block_tma_a
Tensor tAgA = block_tma_a.partition_S(gA); // (TMA,TMA_M,TMA_K,k)
Tensor tAsA = block_tma_a.partition_D(sA); // (TMA,TMA_M,TMA_K,PIPE)
Tensor tBgB = block_tma_b.partition_S(gB); // (TMA,TMA_N,TMA_K,k)
Tensor tBsB = block_tma_b.partition_D(sB); // (TMA,TMA_N,TMA_K,PIPE)
uint16_t mcast_mask_a = 0;
uint16_t mcast_mask_b = 0;
// Issue TmaLoads
// Maps the tile -> block, value
if constexpr (cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>) {
auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id
for (int n = 0; n < size<1>(block_layout); ++n) {
mcast_mask_a |= (uint16_t(1) << block_layout(cluster_local_block_id.x,n,Int<0>{}));
}
}
if constexpr (cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>) {
auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id
for (int m = 0; m < size<0>(block_layout); ++m) {
mcast_mask_b |= (uint16_t(1) << block_layout(m,cluster_local_block_id.y,Int<0>{}));
}
}
// Mainloop
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 0; --k_tile_count) {
// LOCK smem_pipe_write for _writing_
pipeline.producer_acquire(smem_pipe_write);
//
// Copy gmem to smem for *k_tile_iter
//
using BarrierType = typename MainloopPipeline::ProducerBarrierType;
BarrierType* tma_barrier = pipeline.producer_get_barrier(smem_pipe_write);
int write_stage = smem_pipe_write.index();
copy(mainloop_params.tma_load_a.with(*tma_barrier, mcast_mask_a), tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,write_stage));
copy(mainloop_params.tma_load_b.with(*tma_barrier, mcast_mask_b), tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,write_stage));
++k_tile_iter;
// Advance smem_pipe_write
++smem_pipe_write;
}
}
}
/// Perform a Producer Epilogue to prevent early exit of blocks in a Cluster
CUTLASS_DEVICE void
load_tail(MainloopPipeline pipeline, PipelineState smem_pipe_write) {
int lane_predicate = cute::elect_one_sync();
// Issue the epilogue waits
if (lane_predicate) {
/* This helps avoid early exit of blocks in Cluster
* Waits for all stages to either be released (all
* Consumer UNLOCKs), or if the stage was never used
* then would just be acquired since the phase was
* still inverted from make_producer_start_state
*/
pipeline.producer_tail(smem_pipe_write);
}
}
/// Perform a collective-scoped matrix multiply-accumulate
/// Consumer Perspective
template <
class FrgTensorC
>
CUTLASS_DEVICE void
mma(MainloopPipeline pipeline,
PipelineState smem_pipe_read,
FrgTensorC& accum,
int k_tile_count,
int thread_idx,
TensorStorage& shared_tensors,
Params const& mainloop_params) {
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "InternalSmemLayoutAtomA must be rank 2.");
static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "InternalSmemLayoutAtomB must be rank 2.");
static_assert(!cute::is_void_v<InternalSmemCopyAtomA>,
"SM90 GMMA mainloops must specify a non-void copy atom for smem sourced instructions.");
static_assert(cute::is_void_v<InternalSmemCopyAtomB>,
"SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions.");
// Obtain warp index
int warp_idx = canonical_warp_idx_sync();
[[maybe_unused]] int warp_group_thread_idx = thread_idx % 128;
Tensor sA_ = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sA = as_position_independent_swizzle_tensor(sA_); // (BLK_M,BLK_K,PIPE)
Tensor sB_ = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
Tensor sB = as_position_independent_swizzle_tensor(sB_); // (BLK_M,BLK_K,PIPE)
// If TransposeB, GMMA will read from transposed B layout SMEM
Tensor gmma_sB_position_dependent = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()),
GmmaSmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
Tensor gmma_sB = as_position_independent_swizzle_tensor(gmma_sB_position_dependent); // (BLK_N,BLK_K,PIPE)
//
// Define C accumulators and A/B partitioning
//
TiledMma tiled_mma;
auto thread_mma = tiled_mma.get_thread_slice(thread_idx);
// Allocate fragments and descriptors
Tensor tCsA = thread_mma.partition_A(sA);
Tensor tCrA = thread_mma.partition_fragment_A(sA(_,_,Int<0>{})); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCsB = thread_mma.partition_B(gmma_sB_position_dependent); // (MMA,MMA_N,MMA_K,PIPE)
Tensor tCrB = thread_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE)
//
// Copy Atom A retiling
//
auto smem_tiled_copy_A = make_tiled_copy_A(InternalSmemCopyAtomA{}, tiled_mma);
auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(thread_idx);
Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); // (CPY,CPY_M,CPY_K)
Tensor tCsA_copy_view = smem_thr_copy_A.partition_S(sA); // (CPY,CPY_M,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K
CUTE_STATIC_ASSERT_V(size<1>(tCsA_copy_view) == size<1>(tCrA_copy_view)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tCsA_copy_view) == size<2>(tCrA_copy_view)); // CPY_K
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<2>(accum)); // N
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // K
CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tCsB)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE
CUTE_STATIC_ASSERT_V(size<2>(tCrA) > _2{}, "RS loops require more than 2 MMA k-iterations for correctness.");
//
// PIPELINED MAIN LOOP
//
// We release buffers to producer warps(dma load) with some mmas in flight
PipelineState smem_pipe_release = smem_pipe_read;
tiled_mma.accumulate_ = GMMA::ScaleOut::Zero;
TransposeOperandB transpose = cutlass::transform::collective::detail::make_transpose_operand_b(
warp_idx, warp_group_thread_idx, tiled_mma, SmemLayoutB{},
InternalSmemLayoutAtomB{}, InternalElementB{},
cute::bool_constant<TransposeB>{});
warpgroup_fence_operand(accum);
ConsumerToken barrier_token = {BarrierStatus::WaitAgain};
// first k tile
{
barrier_token = pipeline.consumer_try_wait(smem_pipe_read);
pipeline.consumer_wait(smem_pipe_read, barrier_token);
int read_stage = smem_pipe_read.index();
++smem_pipe_read;
barrier_token = pipeline.consumer_try_wait(smem_pipe_read);
// copy smem->rmem for A operand
copy(smem_tiled_copy_A, tCsA_copy_view(_,_,0,read_stage), tCrA_copy_view(_,_,0));
// transpose B operand in SMEM
transpose(sB, gmma_sB, read_stage, 0);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA) - 1; ++k_block) {
copy(smem_tiled_copy_A, tCsA_copy_view(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1));
transpose.synchronize(k_block);
transpose(sB, gmma_sB, read_stage, k_block + 1);
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
if(k_block == 0) {
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
}
warpgroup_commit_batch();
}
warpgroup_wait<2>();
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,size<2>(tCrA) - 1), tCrB(_,_,size<2>(tCrA) - 1,read_stage), accum);
warpgroup_commit_batch();
--k_tile_count;
if(k_tile_count == 0) {
return;
}
pipeline.consumer_wait(smem_pipe_read, barrier_token);
copy(smem_tiled_copy_A, tCsA_copy_view(_,_,0,smem_pipe_read.index()), tCrA_copy_view(_,_,0));
transpose(sB, gmma_sB, smem_pipe_read.index(), 0);
warpgroup_wait<2>();
}
warpgroup_fence_operand(accum);
// Mainloop GMMAs
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 1; --k_tile_count) {
//
// Compute on k_tile
//
int read_stage = smem_pipe_read.index();
++smem_pipe_read;
warpgroup_fence_operand(accum);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) {
if (k_block == 0) {
barrier_token = pipeline.consumer_try_wait(smem_pipe_read);
}
if (k_block == size<2>(tCrA) - 1) {
pipeline.consumer_wait(smem_pipe_read, barrier_token);
copy(smem_tiled_copy_A, tCsA_copy_view(_,_,0,smem_pipe_read.index()), tCrA_copy_view(_,_,0));
// transpose B operand in SMEM
transpose(sB, gmma_sB, smem_pipe_read.index(), 0);
}
else {
copy(smem_tiled_copy_A, tCsA_copy_view(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1));
// transpose B operand in SMEM
transpose.synchronize(k_block); // make transpose of k_block available
transpose(sB, gmma_sB, read_stage, k_block + 1);
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
warpgroup_commit_batch();
warpgroup_wait<2>();
if (k_block == 1) {
// release prior barrier
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
}
warpgroup_fence_operand(accum);
}
warpgroup_fence_operand(accum);
{
//
// Compute on k_tile
//
int read_stage = smem_pipe_read.index();
warpgroup_fence_operand(accum);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA) - 1; ++k_block) {
copy(smem_tiled_copy_A, tCsA_copy_view(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1));
transpose.synchronize(k_block); // make k_block transpose available
transpose(sB, gmma_sB, read_stage, k_block + 1);
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
warpgroup_wait<2>();
if (k_block == 1) {
// release prior barrier
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,size<2>(tCrA) - 1), tCrB(_,_,size<2>(tCrA) - 1,read_stage), accum);
warpgroup_commit_batch();
}
warpgroup_fence_operand(accum);
}
/// Perform a Consumer Epilogue to release all buffers
CUTLASS_DEVICE void
mma_tail(MainloopPipeline pipeline, PipelineState smem_pipe_release, int k_tile_count) {
// Prologue GMMAs
int prologue_mma_count = 1;
k_tile_count -= prologue_mma_count;
smem_pipe_release.advance(k_tile_count);
// Wait on all GMMAs to complete
warpgroup_wait<0>();
for (int count = 0; count < prologue_mma_count; ++count) {
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/collective/sm90_mma_tma_gmma_rs_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/gemm/collective/sm90_mma_tma_gmma_rs_warpspecialized.hpp",
"repo_id": "include",
"token_count": 14823
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/sparse_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_sparse_with_visitor.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/epilogue/threadblock/fusion/visitor_2x.hpp"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*! Sparse GEMM with visitor
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassSimt,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm80,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename FusionCallbacks_ =
typename cutlass::epilogue::threadblock::detail::EmptyCallbacks,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
typename threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator,
/// Number of stages used in the pipelined epilogue
int EpilogueStages = 1>
class SparseGemmWithVisitor {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using FusionCallbacks = FusionCallbacks_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
using MathOperator = Operator;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
/// Define the kernel
using GemmKernel = typename kernel::DefaultSparseGemmWithVisitor<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
FusionCallbacks,
ThreadblockSwizzle,
kStages,
Operator,
EpilogueStages
>::GemmKernel;
using ElementE = typename GemmKernel::ElementE;
using LayoutE = typename GemmKernel::LayoutE;
static int const kAlignmentE = 128 / sizeof_bits<ElementE>::value;
static int const kSparse = GemmKernel::kSparse;
static int const kMetaSizeInBits = GemmKernel::kMetaSizeInBits;
static int const kElementsPerElementE = GemmKernel::kElementsPerElementE;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementE const, LayoutE> ref_E;
typename FusionCallbacks::Arguments epilogue;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments(): problem_size(0, 0, 0) {
}
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementE, LayoutE> ref_E_,
typename FusionCallbacks::Arguments epilogue_ =
typename FusionCallbacks::Arguments()
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_E(ref_E_),
epilogue(epilogue_) {
}
};
private:
/// Kernel parameters object
typename GemmKernel::Params params_;
public:
/// Constructs the GEMM.
SparseGemmWithVisitor() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
Status status = GemmKernel::can_implement(
args.problem_size,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
cutlass::TensorRef<ElementC, LayoutC>(), // It only matters that it's empty.
cutlass::TensorRef<ElementC, LayoutC>(), // Same as above.
args.ref_E.non_const_ref()
);
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
return bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
constexpr int SplitKSlices = 1;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
SplitKSlices);
// Initialize the Params structure
params_ = typename GemmKernel::Params{
args.problem_size,
grid_shape,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_E.non_const_ref(),
args.epilogue
};
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
params_.ref_A.reset(args.ref_A.non_const_ref().data());
params_.ref_B.reset(args.ref_B.non_const_ref().data());
params_.ref_E.reset(args.ref_E.non_const_ref().data());
params_.output_op = args.epilogue;
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(GemmKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/gemm_sparse_with_visitor.h/0 | {
"file_path": "include/cutlass/gemm/device/gemm_sparse_with_visitor.h",
"repo_id": "include",
"token_count": 3980
} | 37 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default configuration for a GEMM with fused epilogue visitor callbacks
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/gemm/kernel/gemm_universal_with_visitor.h"
#include "cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h"
#include "cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Access granularity of C matrix in unit of elements
int kAlignmentC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Element type for epilogue computation
typename ElementEpilogue,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename FusionCallbacks,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Number of stages used in the pipelined epilogue
int EpilogueStages = 1
>
struct DefaultGemmWithVisitor {
using GemmBase = typename DefaultGemmUniversal<
ElementA_, LayoutA_, TransformA, kAlignmentA,
ElementB_, LayoutB_, TransformB, kAlignmentB,
ElementC_, LayoutC_, ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
epilogue::thread::LinearCombination<
ElementC_, kAlignmentC,
ElementAccumulator, ElementEpilogue
>,
ThreadblockSwizzle,
Stages,
Operator
>::GemmKernel;
// Define epilogue
using Epilogue = cutlass::epilogue::threadblock::EpilogueWithVisitorCallbacks<
typename GemmBase::Epilogue,
FusionCallbacks,
EpilogueStages
>;
/// GemmWithVisitor without StreamkFeature member type
template <class SwizzleT, class Enable = void>
class SelectBase :
public GemmWithEpilogueVisitor<
typename GemmBase::Mma,
Epilogue,
SwizzleT>
{};
/// GemmWIthVisitor with StreamkFeature member type
template <class SwizzleT>
class SelectBase<SwizzleT, typename SwizzleT::StreamkFeature> :
public GemmWithEpilogueVisitorStreamk<
typename GemmBase::Mma,
Epilogue,
SwizzleT>
{};
/// Select kernel by ThreadblockSwizzle's support for StreamkFeature
using GemmKernel = SelectBase<ThreadblockSwizzle>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/default_gemm_universal_with_visitor.h/0 | {
"file_path": "include/cutlass/gemm/kernel/default_gemm_universal_with_visitor.h",
"repo_id": "include",
"token_count": 1622
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Stream-K Gemm kernel compatible with fused epilogues
that broadcast a bias vector over the MMA output.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/layout.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/barrier.h"
#include "cutlass/block_striped.h"
#include "cutlass/semaphore.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool IsSingleSource = Epilogue_::kIsSingleSource
>
struct GemmStreamkWithFusedEpilogue;
// GemmStreamkWithFusedEpilogue with two sources
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmStreamkWithFusedEpilogue<Mma_, Epilogue_, ThreadblockSwizzle_, false> {
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
/// The per-thread tile of raw accumulators
using AccumulatorTile = typename Mma::FragmentC;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Workspace bytes per thread block
static size_t const kWorkspaceBytesPerBlock =
__NV_STD_MAX(
kThreadCount * sizeof(AccumulatorTile),
Epilogue::kWorkspaceBytesPerBlock);
/// Block-striped reduction utility
using BlockStripedReduceT = BlockStripedReduce<kThreadCount, AccumulatorTile>;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode{GemmUniversalMode::kGemm};
GemmCoord problem_size{};
int batch_count{1}; // Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor
typename EpilogueOutputOp::Params epilogue{};
void const * ptr_A{nullptr};
void const * ptr_B{nullptr};
void const * ptr_C1{nullptr};
void const * ptr_C2{nullptr};
void * ptr_D{nullptr};
void * ptr_Vector;
void * ptr_Tensor;
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_C1{0};
int64_t batch_stride_C2{0};
int64_t batch_stride_D{0};
int64_t batch_stride_Vector{0};
int64_t batch_stride_Tensor{0};
typename LayoutA::Stride::Index lda{};
typename LayoutB::Stride::Index ldb{};
typename LayoutC::Stride::Index ldc1{};
typename LayoutC::Stride::Index ldc2{};
typename LayoutC::Stride::Index ldd{};
typename LayoutC::Stride::Index ldr{};
typename LayoutC::Stride::Index ldt{};
int avail_sms{-1}; /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
//
// Methods
//
/// Default Constructor
Arguments() = default;
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K)
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C1,
void const * ptr_C2,
void * ptr_D,
void * ptr_Vector,
void * ptr_Tensor,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C1,
int64_t batch_stride_C2,
int64_t batch_stride_D,
int64_t batch_stride_Vector,
int64_t batch_stride_Tensor,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc1,
typename LayoutC::Stride::Index ldc2,
typename LayoutC::Stride::Index ldd,
typename LayoutC::Stride::Index ldr,
typename LayoutC::Stride::Index ldt,
int avail_sms = -1) /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
:
mode(mode),
problem_size(problem_size),
batch_count(batch_split),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C1(ptr_C1), ptr_C2(ptr_C2), ptr_D(ptr_D),
ptr_Vector(ptr_Vector),
ptr_Tensor(ptr_Tensor),
batch_stride_A(batch_stride_A),
batch_stride_B(batch_stride_B),
batch_stride_C1(batch_stride_C1),
batch_stride_C2(batch_stride_C2),
batch_stride_Vector(batch_stride_Vector),
batch_stride_Tensor(batch_stride_Tensor),
lda(lda), ldb(ldb), ldc1(ldc1), ldc2(ldc2), ldd(ldd), ldr(ldr), ldt(ldt), avail_sms(avail_sms)
{
CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size);
CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
CUTLASS_TRACE_HOST(" ldt: " << this->ldt);
CUTLASS_TRACE_HOST(" avail_sms: " << this->avail_sms);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
/// Parameters structure
struct Params
{
public:
//
// Data members
//
void * ptr_A{nullptr};
void * ptr_B{nullptr};
typename Mma::IteratorA::Params params_A{};
typename Mma::IteratorB::Params params_B{};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
GemmUniversalMode mode{GemmUniversalMode::kGemm};
ThreadblockSwizzle block_mapping{};
void *barrier_workspace{nullptr};
void *partials_workspace{nullptr};
typename EpilogueOutputOp::Params output_op{};
void * ptr_C1{nullptr};
void * ptr_C2{nullptr};
void * ptr_D{nullptr};
void * ptr_Tensor{nullptr};
void * ptr_Vector{nullptr};
typename Epilogue::OutputTileIterator::Params params_C1{};
typename Epilogue::OutputTileIterator::Params params_C2{};
typename Epilogue::OutputTileIterator::Params params_D{};
typename Epilogue::TensorTileIterator::Params params_Tensor{};
int64_t batch_stride_C1{0};
int64_t batch_stride_C2{0};
int64_t batch_stride_D{0};
int64_t batch_stride_Vector{0};
int64_t batch_stride_Tensor{0};
typename LayoutC::Stride::Index ldr{};
protected:
//
// Host-only dispatch-utilities
//
/// Pad the given allocation size up to the nearest cache line
static size_t cacheline_align_up(size_t size)
{
static const int CACHELINE_SIZE = 128;
return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE;
}
/// Get the workspace size needed for barrier
size_t get_barrier_workspace_size() const
{
// For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction,
// each reduction block needs its own synchronization flag.
int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks);
return cacheline_align_up(sizeof(typename Barrier::T) * num_flags);
}
/// Get the workspace size needed for intermediate partial sums
size_t get_partials_workspace_size() const
{
int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks);
}
public:
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
params_A(args.lda),
params_B(args.ldb),
params_C1(args.ldc1),
params_C2(args.ldc2),
params_D(args.ldd),
params_Tensor(args.ldt),
output_op(args.epilogue),
mode(args.mode),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C1(const_cast<void *>(args.ptr_C1)),
ptr_C2(const_cast<void *>(args.ptr_C2)),
ptr_D(args.ptr_D),
ptr_Vector(args.ptr_Vector),
ldr(args.ldr),
ptr_Tensor(args.ptr_Tensor),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C1(args.batch_stride_C1),
batch_stride_C2(args.batch_stride_C2),
batch_stride_D(args.batch_stride_D),
batch_stride_Vector(args.batch_stride_Vector),
batch_stride_Tensor(args.batch_stride_Tensor),
barrier_workspace(nullptr),
partials_workspace(nullptr)
{
CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::Params()");
CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
CUTLASS_TRACE_HOST(" ldt: " << args.ldt);
// Number of SMs to make available for StreamK decomposition
int avail_sms = (args.avail_sms == -1) ?
device_sms :
fast_min(args.avail_sms, device_sms);
CUTLASS_TRACE_HOST(" avail_sms: " << avail_sms);
// Initialize the block mapping structure
block_mapping = ThreadblockSwizzle(
args.mode,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count,
sm_occupancy,
device_sms,
avail_sms,
sizeof(ElementA),
sizeof(ElementB),
sizeof(ElementC),
Epilogue::kAccumulatorFragments);
}
/// Returns the workspace size (in bytes) needed for these parameters
size_t get_workspace_size() const
{
return
get_barrier_workspace_size() +
get_partials_workspace_size();
}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
uint8_t *ptr = static_cast<uint8_t*>(workspace);
// Establish partials workspace
partials_workspace = nullptr;
size_t partials_workspace_bytes = get_partials_workspace_size();
if (partials_workspace_bytes > 0)
{
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
partials_workspace = ptr;
ptr += partials_workspace_bytes;
}
// Establish barrier workspace
barrier_workspace = nullptr;
size_t barrier_workspace_bytes = get_barrier_workspace_size();
if (barrier_workspace_bytes > 0)
{
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
barrier_workspace = ptr;
ptr += barrier_workspace_bytes;
}
// Zero-initialize barrier workspace
if (barrier_workspace)
{
size_t barrier_workspace_bytes = get_barrier_workspace_size();
CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes");
cudaError_t result = cudaMemsetAsync(
barrier_workspace,
0,
barrier_workspace_bytes,
stream);
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Returns the GEMM volume in thread block tiles
cutlass::gemm::GemmCoord get_tiled_shape() const
{
return block_mapping.tiled_shape();
}
/// Returns the total number of thread blocks to launch
int get_grid_blocks() const
{
dim3 grid_dims = get_grid_dims();
return grid_dims.x * grid_dims.y * grid_dims.z;
}
/// Returns the grid extents in thread blocks to launch
dim3 get_grid_dims() const
{
return block_mapping.get_grid_dims();
}
/// Lightweight update given a subset of arguments. Problem geometry is assumed
/// to remain the same.
CUTLASS_HOST_DEVICE
void update(Arguments const &args)
{
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C1 = const_cast<void *>(args.ptr_C1);
ptr_C2 = const_cast<void *>(args.ptr_C2);
ptr_D = args.ptr_D;
ptr_Vector = args.ptr_Vector;
ldr = args.ldr;
ptr_Tensor = args.ptr_Tensor;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C1 = args.batch_stride_C1;
batch_stride_C2 = args.batch_stride_C2;
batch_stride_D = args.batch_stride_D;
batch_stride_Vector = args.batch_stride_Vector;
batch_stride_Tensor = args.batch_stride_Tensor;
output_op = args.epilogue;
CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::update()");
CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
}
};
/// Tile work descriptor
struct TileWorkDesc
{
/// The linear tile index
int tile_idx;
/// The location of this tile (in threadblock-tile coordinates) in the output matrix
cutlass::gemm::GemmCoord tiled_coord;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
int iter_begin;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
int k_begin;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
int k_end;
/// The number of remaining MAC-iterations this threadblock will perform for this tile
int k_iters_remaining;
// Whether this block will perform the first iteration of this tile
CUTLASS_DEVICE
bool tile_started()
{
return (k_begin == 0);
}
// Whether this block will perform the last iteration of this tile
CUTLASS_DEVICE
bool tile_finished(Params const ¶ms)
{
return (k_end == params.block_mapping.problem_size.k());
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
protected:
//
// Data members
//
/// GEMM problem parameters
Params const ¶ms;
/// Shared storage reference
SharedStorage &shared_storage;
/// ID within the threadblock
int thread_idx;
/// ID of warp
int warp_idx;
/// ID of each thread within a warp
int lane_idx;
/// Threadblock scoped epilogue
Epilogue epilogue;
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::can_implement()");
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
protected:
//
// Device-only utility methods
//
/// Iterator for fetching tile fragments from A
CUTLASS_DEVICE
typename Mma::IteratorA init_iterator_A(
TileWorkDesc &tile_work,
GemmUniversalMode mode)
{
// The input A matrix
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
// Update input pointers based on batched/array mode
if (mode == GemmUniversalMode::kBatched) {
ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A;
}
if (mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[tile_work.tiled_coord.k()];
}
int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM;
int m_end = params.block_mapping.problem_size.m();
return Mma::IteratorA(
params.params_A,
ptr_A,
{ m_end, tile_work.k_end },
threadIdx.x,
{ m_begin, tile_work.k_begin });
}
/// Iterator for fetching tile fragments from B
CUTLASS_DEVICE
typename Mma::IteratorB init_iterator_B(
TileWorkDesc &tile_work,
GemmUniversalMode mode)
{
// The input B matrix
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
// Update input pointers based on batched/array mode
if (mode == GemmUniversalMode::kBatched) {
ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B;
}
if (mode == GemmUniversalMode::kArray) {
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[tile_work.tiled_coord.k()];
}
int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN;
int n_end = params.block_mapping.problem_size.n();
return Mma::IteratorB(
params.params_B,
ptr_B,
{ tile_work.k_end, n_end },
threadIdx.x,
{ tile_work.k_begin, n_begin });
}
CUTLASS_DEVICE
void init_dp_tile_work(
TileWorkDesc &tile_work,
int tile_idx)
{
// The linear tile index
tile_work.tile_idx = tile_idx;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile();
// The number of MAC-iterations this threadblock will perform for this tile
tile_work.k_iters_remaining = params.block_mapping.iters_per_tile();
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_begin = 0;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_end = params.block_mapping.problem_size.k();
// The location of this tile (in threadblock-tile coordinates) in the output matrix
tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
}
CUTLASS_DEVICE
void init_sk_tile_work(
TileWorkDesc &tile_work,
int tile_idx,
int block_iter_begin,
int block_iter_end)
{
// The linear tile index
tile_work.tile_idx = tile_idx;
// The first global-scoped MAC-iteration for this tile
int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile();
// The first global-scoped MAC-iteration this threadblock will perform for this tile
tile_work.iter_begin = max(block_iter_begin, tile_iter_begin);
// The first tile-scoped MAC-iteration this threadblock will perform for this tile
int k_iter_begin = tile_work.iter_begin - tile_iter_begin;
// The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile
int k_iter_end = block_iter_end - tile_iter_begin;
// The number of MAC-iterations this threadblock will perform for this tile
tile_work.k_iters_remaining = k_iter_end - k_iter_begin;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_begin = k_iter_begin * Mma::Shape::kK;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_end = min(
params.block_mapping.problem_size.k(), // extent of k domain
(k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment
// The location of this tile (in threadblock-tile coordinates) in the output matrix
tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
}
/// Share accumulators with peers
CUTLASS_DEVICE
void share_accumulators(
AccumulatorTile const &accumulator_tile,
int block_idx,
int first_block_idx)
{
AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
int accum_tile_offset = first_block_idx * kThreadCount;
if (block_idx == first_block_idx)
{
// First peer initializes the workspace partials
BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
}
else
{
// Subsequent peers atomically accumulate into the workspace partials
if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic)
{
// Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them
Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1);
}
else
{
// Turnstile reduction order: wait until the previous peer has written
int wait_count = block_idx - first_block_idx;
Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count);
}
// Perform reduction in workspace
BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
}
// Signal our arrival
Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx);
}
/// Acquire accumulators from peers
CUTLASS_DEVICE
void acquire_accumulators(
AccumulatorTile &accumulator_tile,
int block_idx,
int first_block_idx)
{
AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
// Wait for arrival
int num_carry_in = block_idx - first_block_idx;
Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in);
// Load and add peer-partials accumulator tile to local accumulator tile
int accum_tile_offset = first_block_idx * kThreadCount;
BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx);
}
/// Perform epilogue computations and output
CUTLASS_DEVICE
void do_epilogue(
TileWorkDesc &tile_work,
AccumulatorTile &accumulator_tile)
{
ElementC *ptr_C1 = static_cast<ElementC *>(params.ptr_C1);
ElementC *ptr_C2 = static_cast<ElementC *>(params.ptr_C2);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor);
// Define the reduction output pointer and move to the appropriate place
typename Epilogue::ElementVector *ptr_Vector =
static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
// Update pointers for batched/array mode(s)
if (params.mode == GemmUniversalMode::kBatched) {
ptr_C1 += tile_work.tiled_coord.k() * params.batch_stride_C1;
if (ptr_C2) {
ptr_C2 += tile_work.tiled_coord.k() * params.batch_stride_C2;
}
ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D;
if (ptr_Tensor) {
ptr_Tensor = ReferenceFactory<typename Epilogue::ElementTensor>::add_pointer_offset(
ptr_Tensor,
tile_work.tiled_coord.k() * params.batch_stride_Tensor);
}
if (ptr_Vector) {
ptr_Vector += tile_work.tiled_coord.k() * params.batch_stride_Vector;
}
}
if (params.mode == GemmUniversalMode::kArray) {
ptr_C1 = static_cast<ElementC * const *>(params.ptr_C1)[tile_work.tiled_coord.k()];
if (ptr_C2) {
ptr_C2 = static_cast<ElementC * const *>(params.ptr_C2)[tile_work.tiled_coord.k()];
}
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[tile_work.tiled_coord.k()];
if (ptr_Tensor) {
ptr_Tensor = static_cast<typename Epilogue::ElementTensor * const *>(params.ptr_Tensor)[tile_work.tiled_coord.k()];
}
if (ptr_Vector) {
ptr_Vector = static_cast<typename Epilogue::ElementVector * const *>(params.ptr_Vector)[tile_work.tiled_coord.k()];
}
}
// Location of this tile in item-coords
MatrixCoord threadblock_item_begin(
tile_work.tiled_coord.m() * Mma::Shape::kM,
tile_work.tiled_coord.n() * Mma::Shape::kN
);
// Tile iterator loading from residual1.
typename Epilogue::OutputTileIterator iterator_C1(
params.params_C1,
ptr_C1,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator loading from residual2.
typename Epilogue::OutputTileIterator iterator_C2(
params.params_C2,
ptr_C2,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(
params.params_Tensor,
ptr_Tensor,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_item_begin.column() + tile_work.tiled_coord.m() * params.ldr;
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
EpilogueOutputOp(params.output_op),
ptr_Vector,
iterator_D,
accumulator_tile,
iterator_C1,
iterator_C2,
tensor_iterator,
params.block_mapping.problem_size.mn(),
threadblock_item_begin);
}
CUTLASS_DEVICE
void separate_reduction(int reduce_idx)
{
int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx;
// Reduce by sk-tile (every tile contributed to by one or more blocks)
reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments;
reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments;
int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile();
int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1;
peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first);
peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last);
// Wait for peers to complete
int peer_idx_end = peer_idx_last + 1;
int num_peers = peer_idx_end - peer_idx_begin;
Barrier::wait_eq_reset(
params.barrier_workspace,
thread_idx,
(reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx,
num_peers);
/// The location of this tile (in threadblock-tile coordinates) in the output matrix
GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx);
// Location of this tile in item-coords
MatrixCoord threadblock_item_begin(
tiled_coord.m() * Mma::Shape::kM,
tiled_coord.n() * Mma::Shape::kN
);
ElementC *ptr_C1 = static_cast<ElementC *>(params.ptr_C1);
ElementC *ptr_C2 = static_cast<ElementC *>(params.ptr_C2);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor);
// Define the reduction output pointer and move to the appropriate place
typename Epilogue::ElementVector *ptr_Vector =
static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
// Tile iterator loading from residual1.
typename Epilogue::OutputTileIterator iterator_C1(
params.params_C1,
ptr_C1,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator loading from residual2.
typename Epilogue::OutputTileIterator iterator_C2(
params.params_C2,
ptr_C2,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(
params.params_Tensor,
ptr_Tensor,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_item_begin.column() + tiled_coord.m() * params.ldr;
}
// Execute the epilogue operator to update the destination tensor.
epilogue.reduce(
peer_idx_begin,
peer_idx_end,
reduce_fragment_idx,
params.partials_workspace,
EpilogueOutputOp(params.output_op),
ptr_Vector,
iterator_D,
iterator_C1,
iterator_C2,
tensor_iterator,
params.block_mapping.problem_size.mn(),
threadblock_item_begin);
}
CUTLASS_DEVICE
void process_tile(
TileWorkDesc tile_work,
int block_idx,
int dp_start_block_idx,
int block_iter_begin)
{
// Initialize input iterators
typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode);
typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode);
// Initialize accumulators
AccumulatorTile accumulator_tile;
accumulator_tile.clear();
// Initialize MMA abstraction
Mma mma(
shared_storage.main_loop,
thread_idx,
warp_idx,
lane_idx);
// Perform this tile's range of multiply-accumulate (MAC) iterations
mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile);
if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) ||
(params.block_mapping.reduction_blocks == 0) ||
(block_idx >= dp_start_block_idx))
{
//
// Cooperative SK peer reduction or DP block
//
int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx);
if (!tile_work.tile_finished(params)) {
// Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace
share_accumulators(accumulator_tile, block_idx, first_block_idx);
}
else
{
// DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile
if (!tile_work.tile_started())
{
// A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks
acquire_accumulators(accumulator_tile, block_idx, first_block_idx);
}
do_epilogue(tile_work, accumulator_tile);
}
}
else
{
//
// Separate peer reduction
//
// Share accumulator partial sums with peer threadblock(s) through scratch workspace
epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started());
// Signal arrival
Barrier::arrive_range_inc(
params.barrier_workspace,
thread_idx,
tile_work.tile_idx * Epilogue::kAccumulatorFragments,
Epilogue::kAccumulatorFragments);
}
}
/// Executes one GEMM
CUTLASS_DEVICE
void gemm()
{
// Initialize block's iteration range
int tile_idx = 0;
int block_iter_begin = 0;
int block_iters_remaining = 0;
int block_idx = params.block_mapping.get_block_idx();
int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region();
int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms;
int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks;
int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks;
// Initialize tile work descriptor
TileWorkDesc tile_work;
bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx);
bool sk_block = (block_idx < sk_padding_start_block_idx);
bool reduce_block = (block_idx >= reduce_start_block_idx) &&
(block_idx < grid_padding_start_block_idx) &&
(ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed);
if (dp_block)
{
// This is a DP block
int dp_block_idx = block_idx - dp_start_block_idx;
int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles;
// Blocks in first DP wave get configured number of tiles
tile_idx = first_dp_tile + dp_block_idx;
int tile_allottment = params.block_mapping.dp_first_wave_tiles;
// Blocks in subsequent DP waves get 1 tile
if (dp_block_idx >= params.block_mapping.avail_sms) {
tile_allottment = 1;
tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms;
}
block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment;
init_dp_tile_work(tile_work, tile_idx);
// DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1)
if ((tile_idx < params.block_mapping.sk_tiles) ||
(tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) ||
(tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n()))
{
return;
}
}
else if (sk_block)
{
// This is a SK block
int block_iter_end;
params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end);
block_iters_remaining = block_iter_end - block_iter_begin;
tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1);
init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
}
else
{
if (reduce_block)
{
// This is a reduction threadblock
int reduce_block_idx = block_idx - reduce_start_block_idx;
separate_reduction(reduce_block_idx);
}
return;
}
// Iteration-processing loop body
CUTLASS_PRAGMA_NO_UNROLL
while (true)
{
// Perform this block's share of work for this tile
process_tile(
tile_work,
block_idx,
dp_start_block_idx,
block_iter_begin);
block_iters_remaining -= tile_work.k_iters_remaining;
if (block_iters_remaining == 0)
{
break;
}
// Continue to next tile
__syncthreads();
if (block_idx >= dp_start_block_idx)
{
// DP block consume their tiles at stride
tile_idx += params.block_mapping.avail_sms;
init_dp_tile_work(tile_work, tile_idx);
}
else
{
// SK blocks consume their tiles in backwards order
tile_idx--;
init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
}
}
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmStreamkWithFusedEpilogue op(params, shared_storage);
op();
}
// Constructor
CUTLASS_DEVICE
GemmStreamkWithFusedEpilogue(
Params const ¶ms,
SharedStorage &shared_storage)
:
params(params),
shared_storage(shared_storage),
thread_idx(threadIdx.x),
warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code
lane_idx(threadIdx.x % 32),
epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx)
{}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()() {
// Generic SK code path
gemm();
}
};
// GemmStreamkWithFusedEpilogue with one source
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmStreamkWithFusedEpilogue<Mma_, Epilogue_, ThreadblockSwizzle_, true> {
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
/// The per-thread tile of raw accumulators
using AccumulatorTile = typename Mma::FragmentC;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Workspace bytes per thread block
static size_t const kWorkspaceBytesPerBlock =
__NV_STD_MAX(
kThreadCount * sizeof(AccumulatorTile),
Epilogue::kWorkspaceBytesPerBlock);
/// Block-striped reduction utility
using BlockStripedReduceT = BlockStripedReduce<kThreadCount, AccumulatorTile>;
//
// Structures
//
/// Argument structure
struct Arguments
{
//
// Data members
//
GemmUniversalMode mode{GemmUniversalMode::kGemm};
GemmCoord problem_size{};
int batch_count{1}; // Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor
typename EpilogueOutputOp::Params epilogue{};
void const * ptr_A{nullptr};
void const * ptr_B{nullptr};
void const * ptr_C{nullptr};
void * ptr_D{nullptr};
void * ptr_Vector{nullptr};
void * ptr_Tensor{nullptr};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_C{0};
int64_t batch_stride_D{0};
int64_t batch_stride_Vector{0};
int64_t batch_stride_Tensor{0};
typename LayoutA::Stride::Index lda{};
typename LayoutB::Stride::Index ldb{};
typename LayoutC::Stride::Index ldc{};
typename LayoutC::Stride::Index ldd{};
typename LayoutC::Stride::Index ldr{};
typename LayoutC::Stride::Index ldt{};
int avail_sms{-1}; /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
//
// Methods
//
/// Default Constructor
Arguments() = default;
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K)
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
void * ptr_Vector,
void * ptr_Tensor,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
int64_t batch_stride_Vector,
int64_t batch_stride_Tensor,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd,
typename LayoutC::Stride::Index ldr,
typename LayoutC::Stride::Index ldt,
int avail_sms = -1) /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
:
mode(mode),
problem_size(problem_size),
batch_count(batch_split),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
ptr_Vector(ptr_Vector),
ptr_Tensor(ptr_Tensor),
batch_stride_A(batch_stride_A),
batch_stride_B(batch_stride_B),
batch_stride_C(batch_stride_C),
batch_stride_Vector(batch_stride_Vector),
batch_stride_Tensor(batch_stride_Tensor),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ldr(ldr), ldt(ldt), avail_sms(avail_sms)
{
CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size);
CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
CUTLASS_TRACE_HOST(" ldt: " << this->ldt);
CUTLASS_TRACE_HOST(" avail_sms: " << this->avail_sms);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
/// Parameters structure
struct Params
{
public:
//
// Data members
//
void * ptr_A{nullptr};
void * ptr_B{nullptr};
typename Mma::IteratorA::Params params_A{};
typename Mma::IteratorB::Params params_B{};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
GemmUniversalMode mode{GemmUniversalMode::kGemm};
ThreadblockSwizzle block_mapping{};
void *barrier_workspace{nullptr};
void *partials_workspace{nullptr};
typename EpilogueOutputOp::Params output_op{};
void * ptr_C{nullptr};
void * ptr_D{nullptr};
void * ptr_Tensor{nullptr};
void * ptr_Vector{nullptr};
typename Epilogue::OutputTileIterator::Params params_C{};
typename Epilogue::OutputTileIterator::Params params_D{};
typename Epilogue::TensorTileIterator::Params params_Tensor{};
int64_t batch_stride_C{0};
int64_t batch_stride_D{0};
int64_t batch_stride_Vector{0};
int64_t batch_stride_Tensor{0};
typename LayoutC::Stride::Index ldr{};
protected:
//
// Host-only dispatch-utilities
//
/// Pad the given allocation size up to the nearest cache line
static size_t cacheline_align_up(size_t size)
{
static const int CACHELINE_SIZE = 128;
return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE;
}
/// Get the workspace size needed for barrier
size_t get_barrier_workspace_size() const
{
// For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction,
// each reduction block needs its own synchronization flag.
int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks);
return cacheline_align_up(sizeof(typename Barrier::T) * num_flags);
}
/// Get the workspace size needed for intermediate partial sums
size_t get_partials_workspace_size() const
{
int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks);
}
public:
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
params_A(args.lda),
params_B(args.ldb),
params_C(args.ldc),
params_D(args.ldd),
params_Tensor(args.ldt),
output_op(args.epilogue),
mode(args.mode),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(args.ptr_D),
ptr_Vector(args.ptr_Vector),
ldr(args.ldr),
ptr_Tensor(args.ptr_Tensor),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
batch_stride_Vector(args.batch_stride_Vector),
batch_stride_Tensor(args.batch_stride_Tensor),
barrier_workspace(nullptr),
partials_workspace(nullptr)
{
CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::Params()");
CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
CUTLASS_TRACE_HOST(" ldt: " << args.ldt);
// Number of SMs to make available for StreamK decomposition
int avail_sms = (args.avail_sms == -1) ?
device_sms :
fast_min(args.avail_sms, device_sms);
CUTLASS_TRACE_HOST(" avail_sms: " << avail_sms);
// Initialize the block mapping structure
block_mapping = ThreadblockSwizzle(
args.mode,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count,
sm_occupancy,
device_sms,
avail_sms,
sizeof(ElementA),
sizeof(ElementB),
sizeof(ElementC),
Epilogue::kAccumulatorFragments);
}
/// Returns the workspace size (in bytes) needed for these parameters
size_t get_workspace_size() const
{
return
get_barrier_workspace_size() +
get_partials_workspace_size();
}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
uint8_t *ptr = static_cast<uint8_t*>(workspace);
// Establish partials workspace
partials_workspace = nullptr;
size_t partials_workspace_bytes = get_partials_workspace_size();
if (partials_workspace_bytes > 0)
{
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
partials_workspace = ptr;
ptr += partials_workspace_bytes;
}
// Establish barrier workspace
barrier_workspace = nullptr;
size_t barrier_workspace_bytes = get_barrier_workspace_size();
if (barrier_workspace_bytes > 0)
{
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
barrier_workspace = ptr;
ptr += barrier_workspace_bytes;
}
// Zero-initialize barrier workspace
if (barrier_workspace)
{
size_t barrier_workspace_bytes = get_barrier_workspace_size();
CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes");
cudaError_t result = cudaMemsetAsync(
barrier_workspace,
0,
barrier_workspace_bytes,
stream);
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Returns the GEMM volume in thread block tiles
cutlass::gemm::GemmCoord get_tiled_shape() const
{
return block_mapping.tiled_shape();
}
/// Returns the total number of thread blocks to launch
int get_grid_blocks() const
{
dim3 grid_dims = get_grid_dims();
return grid_dims.x * grid_dims.y * grid_dims.z;
}
/// Returns the grid extents in thread blocks to launch
dim3 get_grid_dims() const
{
return block_mapping.get_grid_dims();
}
/// Lightweight update given a subset of arguments. Problem geometry is assumed
/// to remain the same.
CUTLASS_HOST_DEVICE
void update(Arguments const &args)
{
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
ptr_Vector = args.ptr_Vector;
ldr = args.ldr;
ptr_Tensor = args.ptr_Tensor;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C = args.batch_stride_C;
batch_stride_D = args.batch_stride_D;
batch_stride_Vector = args.batch_stride_Vector;
batch_stride_Tensor = args.batch_stride_Tensor;
output_op = args.epilogue;
CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::update()");
CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
}
};
/// Tile work descriptor
struct TileWorkDesc
{
/// The linear tile index
int tile_idx;
/// The location of this tile (in threadblock-tile coordinates) in the output matrix
cutlass::gemm::GemmCoord tiled_coord;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
int iter_begin;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
int k_begin;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
int k_end;
/// The number of remaining MAC-iterations this threadblock will perform for this tile
int k_iters_remaining;
// Whether this block will perform the first iteration of this tile
CUTLASS_DEVICE
bool tile_started()
{
return (k_begin == 0);
}
// Whether this block will perform the last iteration of this tile
CUTLASS_DEVICE
bool tile_finished(Params const ¶ms)
{
return (k_end == params.block_mapping.problem_size.k());
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
protected:
//
// Data members
//
/// GEMM problem parameters
Params const ¶ms;
/// Shared storage reference
SharedStorage &shared_storage;
/// ID within the threadblock
int thread_idx;
/// ID of warp
int warp_idx;
/// ID of each thread within a warp
int lane_idx;
/// Threadblock scoped epilogue
Epilogue epilogue;
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::can_implement()");
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
protected:
//
// Device-only utility methods
//
/// Iterator for fetching tile fragments from A
CUTLASS_DEVICE
typename Mma::IteratorA init_iterator_A(
TileWorkDesc &tile_work,
GemmUniversalMode mode)
{
// The input A matrix
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
// Update input pointers based on batched/array mode
if (mode == GemmUniversalMode::kBatched) {
ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A;
}
if (mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[tile_work.tiled_coord.k()];
}
int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM;
int m_end = params.block_mapping.problem_size.m();
return Mma::IteratorA(
params.params_A,
ptr_A,
{ m_end, tile_work.k_end },
threadIdx.x,
{ m_begin, tile_work.k_begin });
}
/// Iterator for fetching tile fragments from B
CUTLASS_DEVICE
typename Mma::IteratorB init_iterator_B(
TileWorkDesc &tile_work,
GemmUniversalMode mode)
{
// The input B matrix
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
// Update input pointers based on batched/array mode
if (mode == GemmUniversalMode::kBatched) {
ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B;
}
if (mode == GemmUniversalMode::kArray) {
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[tile_work.tiled_coord.k()];
}
int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN;
int n_end = params.block_mapping.problem_size.n();
return Mma::IteratorB(
params.params_B,
ptr_B,
{ tile_work.k_end, n_end },
threadIdx.x,
{ tile_work.k_begin, n_begin });
}
CUTLASS_DEVICE
void init_dp_tile_work(
TileWorkDesc &tile_work,
int tile_idx)
{
// The linear tile index
tile_work.tile_idx = tile_idx;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile();
// The number of MAC-iterations this threadblock will perform for this tile
tile_work.k_iters_remaining = params.block_mapping.iters_per_tile();
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_begin = 0;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_end = params.block_mapping.problem_size.k();
// The location of this tile (in threadblock-tile coordinates) in the output matrix
tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
}
CUTLASS_DEVICE
void init_sk_tile_work(
TileWorkDesc &tile_work,
int tile_idx,
int block_iter_begin,
int block_iter_end)
{
// The linear tile index
tile_work.tile_idx = tile_idx;
// The first global-scoped MAC-iteration for this tile
int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile();
// The first global-scoped MAC-iteration this threadblock will perform for this tile
tile_work.iter_begin = max(block_iter_begin, tile_iter_begin);
// The first tile-scoped MAC-iteration this threadblock will perform for this tile
int k_iter_begin = tile_work.iter_begin - tile_iter_begin;
// The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile
int k_iter_end = block_iter_end - tile_iter_begin;
// The number of MAC-iterations this threadblock will perform for this tile
tile_work.k_iters_remaining = k_iter_end - k_iter_begin;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_begin = k_iter_begin * Mma::Shape::kK;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_end = min(
params.block_mapping.problem_size.k(), // extent of k domain
(k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment
// The location of this tile (in threadblock-tile coordinates) in the output matrix
tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
}
/// Share accumulators with peers
CUTLASS_DEVICE
void share_accumulators(
AccumulatorTile const &accumulator_tile,
int block_idx,
int first_block_idx)
{
AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
int accum_tile_offset = first_block_idx * kThreadCount;
if (block_idx == first_block_idx)
{
// First peer initializes the workspace partials
BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
}
else
{
// Subsequent peers atomically accumulate into the workspace partials
if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic)
{
// Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them
Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1);
}
else
{
// Turnstile reduction order: wait until the previous peer has written
int wait_count = block_idx - first_block_idx;
Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count);
}
// Perform reduction in workspace
BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
}
// Signal our arrival
Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx);
}
/// Acquire accumulators from peers
CUTLASS_DEVICE
void acquire_accumulators(
AccumulatorTile &accumulator_tile,
int block_idx,
int first_block_idx)
{
AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
// Wait for arrival
int num_carry_in = block_idx - first_block_idx;
Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in);
// Load and add peer-partials accumulator tile to local accumulator tile
int accum_tile_offset = first_block_idx * kThreadCount;
BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx);
}
/// Perform epilogue computations and output
CUTLASS_DEVICE
void do_epilogue(
TileWorkDesc &tile_work,
AccumulatorTile &accumulator_tile)
{
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor);
// Define the reduction output pointer and move to the appropriate place
typename Epilogue::ElementVector *ptr_Vector =
static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
// Update pointers for batched/array mode(s)
if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += tile_work.tiled_coord.k() * params.batch_stride_C;
ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D;
if (ptr_Tensor) {
ptr_Tensor = ReferenceFactory<typename Epilogue::ElementTensor>::add_pointer_offset(
ptr_Tensor,
tile_work.tiled_coord.k() * params.batch_stride_Tensor);
}
if (ptr_Vector) {
ptr_Vector += tile_work.tiled_coord.k() * params.batch_stride_Vector;
}
}
if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[tile_work.tiled_coord.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[tile_work.tiled_coord.k()];
if (ptr_Tensor) {
ptr_Tensor = static_cast<typename Epilogue::ElementTensor * const *>(params.ptr_Tensor)[tile_work.tiled_coord.k()];
}
if (ptr_Vector) {
ptr_Vector = static_cast<typename Epilogue::ElementVector * const *>(params.ptr_Vector)[tile_work.tiled_coord.k()];
}
}
// Location of this tile in item-coords
MatrixCoord threadblock_item_begin(
tile_work.tiled_coord.m() * Mma::Shape::kM,
tile_work.tiled_coord.n() * Mma::Shape::kN
);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(
params.params_Tensor,
ptr_Tensor,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_item_begin.column() + tile_work.tiled_coord.m() * params.ldr;
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
EpilogueOutputOp(params.output_op),
ptr_Vector,
iterator_D,
accumulator_tile,
iterator_C,
tensor_iterator,
params.block_mapping.problem_size.mn(),
threadblock_item_begin);
}
CUTLASS_DEVICE
void separate_reduction(int reduce_idx)
{
int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx;
// Reduce by sk-tile (every tile contributed to by one or more blocks)
reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments;
reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments;
int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile();
int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1;
peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first);
peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last);
// Wait for peers to complete
int peer_idx_end = peer_idx_last + 1;
int num_peers = peer_idx_end - peer_idx_begin;
Barrier::wait_eq_reset(
params.barrier_workspace,
thread_idx,
(reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx,
num_peers);
/// The location of this tile (in threadblock-tile coordinates) in the output matrix
GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx);
// Location of this tile in item-coords
MatrixCoord threadblock_item_begin(
tiled_coord.m() * Mma::Shape::kM,
tiled_coord.n() * Mma::Shape::kN
);
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor);
// Define the reduction output pointer and move to the appropriate place
typename Epilogue::ElementVector *ptr_Vector =
static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(
params.params_Tensor,
ptr_Tensor,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_item_begin.column() + tiled_coord.m() * params.ldr;
}
// Execute the epilogue operator to update the destination tensor.
epilogue.reduce(
peer_idx_begin,
peer_idx_end,
reduce_fragment_idx,
params.partials_workspace,
EpilogueOutputOp(params.output_op),
ptr_Vector,
iterator_D,
iterator_C,
tensor_iterator,
params.block_mapping.problem_size.mn(),
threadblock_item_begin);
}
CUTLASS_DEVICE
void process_tile(
TileWorkDesc tile_work,
int block_idx,
int dp_start_block_idx,
int block_iter_begin)
{
// Initialize input iterators
typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode);
typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode);
// Initialize accumulators
AccumulatorTile accumulator_tile;
accumulator_tile.clear();
// Initialize MMA abstraction
Mma mma(
shared_storage.main_loop,
thread_idx,
warp_idx,
lane_idx);
// Perform this tile's range of multiply-accumulate (MAC) iterations
mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile);
if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) ||
(params.block_mapping.reduction_blocks == 0) ||
(block_idx >= dp_start_block_idx))
{
//
// Cooperative SK peer reduction or DP block
//
int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx);
if (!tile_work.tile_finished(params)) {
// Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace
share_accumulators(accumulator_tile, block_idx, first_block_idx);
}
else
{
// DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile
if (!tile_work.tile_started())
{
// A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks
acquire_accumulators(accumulator_tile, block_idx, first_block_idx);
}
do_epilogue(tile_work, accumulator_tile);
}
}
else
{
//
// Separate peer reduction
//
// Share accumulator partial sums with peer threadblock(s) through scratch workspace
epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started());
// Signal arrival
Barrier::arrive_range_inc(
params.barrier_workspace,
thread_idx,
tile_work.tile_idx * Epilogue::kAccumulatorFragments,
Epilogue::kAccumulatorFragments);
}
}
/// Executes one GEMM
CUTLASS_DEVICE
void gemm()
{
// Initialize block's iteration range
int tile_idx = 0;
int block_iter_begin = 0;
int block_iters_remaining = 0;
int block_idx = params.block_mapping.get_block_idx();
int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region();
int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms;
int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks;
int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks;
// Initialize tile work descriptor
TileWorkDesc tile_work;
bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx);
bool sk_block = (block_idx < sk_padding_start_block_idx);
bool reduce_block = (block_idx >= reduce_start_block_idx) &&
(block_idx < grid_padding_start_block_idx) &&
(ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed);
if (dp_block)
{
// This is a DP block
int dp_block_idx = block_idx - dp_start_block_idx;
int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles;
// Blocks in first DP wave get configured number of tiles
tile_idx = first_dp_tile + dp_block_idx;
int tile_allottment = params.block_mapping.dp_first_wave_tiles;
// Blocks in subsequent DP waves get 1 tile
if (dp_block_idx >= params.block_mapping.avail_sms) {
tile_allottment = 1;
tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms;
}
block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment;
init_dp_tile_work(tile_work, tile_idx);
// DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1)
if ((tile_idx < params.block_mapping.sk_tiles) ||
(tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) ||
(tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n()))
{
return;
}
}
else if (sk_block)
{
// This is a SK block
int block_iter_end;
params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end);
block_iters_remaining = block_iter_end - block_iter_begin;
tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1);
init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
}
else
{
if (reduce_block)
{
// This is a reduction threadblock
int reduce_block_idx = block_idx - reduce_start_block_idx;
separate_reduction(reduce_block_idx);
}
return;
}
// Iteration-processing loop body
CUTLASS_PRAGMA_NO_UNROLL
while (true)
{
// Perform this block's share of work for this tile
process_tile(
tile_work,
block_idx,
dp_start_block_idx,
block_iter_begin);
block_iters_remaining -= tile_work.k_iters_remaining;
if (block_iters_remaining == 0)
{
break;
}
// Continue to next tile
__syncthreads();
if (block_idx >= dp_start_block_idx)
{
// DP block consume their tiles at stride
tile_idx += params.block_mapping.avail_sms;
init_dp_tile_work(tile_work, tile_idx);
}
else
{
// SK blocks consume their tiles in backwards order
tile_idx--;
init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
}
}
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmStreamkWithFusedEpilogue op(params, shared_storage);
op();
}
// Constructor
CUTLASS_DEVICE
GemmStreamkWithFusedEpilogue(
Params const ¶ms,
SharedStorage &shared_storage)
:
params(params),
shared_storage(shared_storage),
thread_idx(threadIdx.x),
warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code
lane_idx(threadIdx.x % 32),
epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx)
{}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()() {
// Generic SK code path
gemm();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h",
"repo_id": "include",
"token_count": 32264
} | 39 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Problem visitor for grouped Rank2K operations.
This problem visitor is specialized for Rank2K operations, for which matrix C is upper/lower
triangular. Using a problem visitor designed for GEMMs for Rank2K problems is inefficient
because threadblocks will be frequently assigned to tiles that exit early (e.g., due to
being assigned to a tile in the upper-triangular portion of a lower-triangular problem).
This can lead to load imbalance among threadblocks, as the GEMM-based scheduler
assigns all threadblocks to nearly the same number of tiles, regardless of whether
those tiles exit early.
Consider an example of a group of four Rank2Ks with matrix C consisting of a grid of 2x2 tiles.
Consider a grid of 8 threadblocks. The default GEMM scheduler will assign threadblocks to
tiles in the following order:
Rank2K 0 Rank2K 1 Rank2K 2 Rank2K 3
0 1 4 5 0 1 4 5
2 3 6 7 2 3 6 7
Assuming that the problems are lower triangular, blocks 1 and 5 are continuously assigned
to inactive tiles.
This problem visitor aims to assign threadblocks to only those tiles which are in the
upper/lower triangular portion of a given problem. Using the example above, the resulting
assignment would be:
Rank2K 0 Rank2K 1 Rank2K 2 Rank2K 3
0 - 3 - 6 - 1 -
1 2 4 5 7 0 2 3
Achieving the schedule above requires a mapping from threadblock ID to tile coordinates (i, j).
We will illustrate this by mapping on a lower-triangular matrix with a 3x3 grid. We first
calculate row and column indices assuming one-indexed rows, tiles, and threadblock IDs, and
then subtract one to convert to zero-indexed.
Col 1 Col 2 Col 3
----------------------
Row 1 | 1 - -
Row 2 | 2 3 -
Row 3 | 4 5 6
We next outline this mapping, borrowing from: https://stackoverflow.com/a/40954159
Calculating row i given threadblock ID t
----------------------------------------
For a given row i, all threadblock IDs t in that row satisfy the following:
t <= 1 + 2 + 3 + ... + (i-1) + i
The closed-form equation for the right-hand side is: i(i+1)/2.
Using this, we can solve for i given t:
t <= i(i+1)/2
2t <= i^2 + i
2t <= i^2 + i + 0.25 - 0.25
2t + 0.25 <= i^2 + i + 0.25
2t + 0.25 <= (i + 0.5)^2
sqrt(2t + 0.25) - 0.5 <= i
To account for fractional values, we set:
i = ceil(sqrt(2t + 0.25) - 0.5)
To turn this into a zero-indexed row and work with zero-indexed t, we perform:
i = ceil(sqrt(2(t+1) + 0.25) - 0.5) - 1
= ceil(sqrt(2t + 2.25) - 0.5) - 1
Calculating column j given threadblock ID t and row i
-----------------------------------------------------
For a given row i, all threadblock IDs t in that row also satisfy the following:
t > 1 + 2 + 3 + ... + (i-2) + (i-1)
--> t > i(i-1)/2
Threadblock IDs within a given row are sequential, so the one-indexed column ID
for one-indexed threadblock ID t and row i is:
j = t - (i(i-1)/2)
The zero-indexed version becomes:
j = (t+1) - (i(i+1)/2) -1
= t - (i(i+1)/2)
Accounting for non-square grids
-------------------------------
Though the overall output problem size for Rank2K problems is guranteed to be square, the
grids used in computing may not be square due to using non-square threadblock shapes. For
example, a threadblock shape of 64x32 operating on a problem of output size 128x128 would
result in a grid of 2x4 tiles.
This case can be handled by noting that the output resembles a square grid of 2x2 "macro tiles"
each of which contains 2 "true tiles." We can thus first map a threadblock ID to its "macro tile"
using the equations above, and then map it to the "true tile" within its "macro tile." In the example
of a 2x4 grid, this mapping would look as follows:
"Macro grid" "True grid"
{0, 1} - 0 1 - -
{2, 3} {4, 5} 2 3 4 5
A zero-indexed threadblock ID t is mapped to its "macro tile ID" t_macro as:
t_macro = t // r
Where r is the ratio of the maximum dimension of the grid to the minimum dimension of the grid
(i.e., r = 4 / 2 = 2 in the previous example).
One uses t_macro and the calculations above to find the row and column in the square matrix to
obtain i_macro and j_macro (zero-indexed). The mapping from (i_macro, j_macro) --> (i, j)
is simply the following:
if (ThreadblockShape::M > ThreadblockShape::N):
r = ThreadblockShape::M / ThreadblockShape::N
i = i_macro
j = (j_macro * r) + (t % r)
elif (ThreadblockShape::M < ThreadblockShape::N):
r = ThreadblockShape::N / ThreadblockShape::M
i = (i_macro * r) + (t % r)
j = j_macro
else:
i = i_macro
j = j_macro
Handling cases with grid dimensions that aren't multiples of eachother
----------------------------------------------------------------------
Even though threadblock shapes M and N are typically multiples of one another, the grid
for a given problem may not have dimensions of the same ratio as that of the threadblock.
For example, a problem of size 132x132 using a threadblock of shape 64x32 will result
in a grid of 3x5 tiles. In this case, there is not an integer number of "true tiles"
per "macro tile."
When this scenario arises, we simply pad the larger dimension of the grid such that
there are an integer number of "true tiles" per "macro tile." Thus, the 3x5 grid in
the example above will be treated as a 3x6 grid. Row and column positions for each
tile are calculated as above. Any threadblocks that map to tiles that are outside the
problem range or upper/lower triangular portion (e.g., (2, 5)) will exit early from
this problem and may proceed to the next problem in the group.
Handling upper-triangular matrices
----------------------------------
The only modification needed for upper-triangular matrices is to swap i_macro and j_macro
in the calculations above.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/gemm/kernel/grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
namespace detail {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Helpers for calculating offsets for Rank2K problem visitor. These helpers specifically pertain
// to the conversion from "macro tiles" to "true tiles" in the description above.
//
template <
typename ThreadblockShape,
typename Enable = void
>
struct Rank2KGroupedProblemVisitorOffsetHelper;
// Partial specialization for the case where threadblock shape M > threadblock shape N
template <
typename ThreadblockShape
>
struct Rank2KGroupedProblemVisitorOffsetHelper<
ThreadblockShape,
typename platform::enable_if< (ThreadblockShape::kM > ThreadblockShape::kN) >::type
> {
static_assert(ThreadblockShape::kM % ThreadblockShape::kN == 0,
"Rank2KGroupedProblemVisitor with threadblock shape M > threadblock shape N "
"requires that threadblock shape M be a multiple of threadblock shape N.");
static int32_t const kThreadblockSkewRatio = ThreadblockShape::kM / ThreadblockShape::kN;
CUTLASS_HOST_DEVICE
static int32_t min_dim(cutlass::gemm::GemmCoord grid) {
return grid.m();
}
CUTLASS_HOST_DEVICE
static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) {
return row;
}
CUTLASS_HOST_DEVICE
static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) {
return (col * kThreadblockSkewRatio) + (threadblock_id % kThreadblockSkewRatio);
}
};
// Partial specialization for the case where threadblock shape M < threadblock shape N
template <
typename ThreadblockShape
>
struct Rank2KGroupedProblemVisitorOffsetHelper<
ThreadblockShape,
typename platform::enable_if< (ThreadblockShape::kM < ThreadblockShape::kN) >::type
> {
static_assert(ThreadblockShape::kN % ThreadblockShape::kM == 0,
"Rank2KGroupedProblemVisitor with threadblock shape M < threadblock shape N "
"requires that threadblock shape N be a multiple of threadblock shape M.");
static int32_t const kThreadblockSkewRatio = ThreadblockShape::kN / ThreadblockShape::kM;
CUTLASS_HOST_DEVICE
static int32_t min_dim(cutlass::gemm::GemmCoord grid) {
return grid.n();
}
CUTLASS_HOST_DEVICE
static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) {
return (row * kThreadblockSkewRatio) + (threadblock_id % kThreadblockSkewRatio);
}
CUTLASS_HOST_DEVICE
static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) {
return col;
}
};
// Partial specialization for the case where threadblock shape M == threadblock shape N
// In this case, macro tiles are equivalent to true tiles, so the conversions are
// identity functions.
template <
typename ThreadblockShape
>
struct Rank2KGroupedProblemVisitorOffsetHelper<
ThreadblockShape,
typename platform::enable_if< (ThreadblockShape::kM == ThreadblockShape::kN) >::type
> {
static int32_t const kThreadblockSkewRatio = 1;
CUTLASS_HOST_DEVICE
static int32_t min_dim(cutlass::gemm::GemmCoord grid) {
return grid.m();
}
CUTLASS_HOST_DEVICE
static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) {
return row;
}
CUTLASS_HOST_DEVICE
static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) {
return col;
}
};
// Helper for correctly representing problem sizes in grouped kernels
template <typename ThreadblockShape>
struct Rank2KGroupedProblemSizeHelper {
using OffsetHelper = Rank2KGroupedProblemVisitorOffsetHelper<ThreadblockShape>;
CUTLASS_HOST_DEVICE
static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) {
return cutlass::gemm::GemmCoord(
((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM),
((problem.n() - 1 + ThreadblockShape::kN) / ThreadblockShape::kN),
1);
}
CUTLASS_HOST_DEVICE
static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) {
// Return the number of tiles at or below the diagonal (or at and above
// for mode kUpper). We do this by first calculating this value assuming
// we have a square matrix of tiles of size `dim x dim` where `dim` is the
// minimum among {grid.m(), grid.n()}. We then multiply the resulting value
// by OffsetHelper::kThreadblockSkewRatio to account for cases in which there
// are more tiles in one dimension than the other.
int32_t dim = OffsetHelper::min_dim(grid);
int32_t tiles_on_diagonal = dim;
int32_t tiles_below_diagonal = ((dim * (dim - 1)) / 2);
return (tiles_on_diagonal + tiles_below_diagonal) * OffsetHelper::kThreadblockSkewRatio;
}
CUTLASS_HOST_DEVICE
static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Default problem visitor for fill modes kUpper and kLower.
//
template <typename ThreadblockShape,
GroupScheduleMode GroupScheduleMode_,
int PrefetchTileCount,
int ThreadCount,
cutlass::FillMode FillModeC>
struct Rank2KGroupedProblemVisitor : public GroupedProblemVisitor<
detail::Rank2KGroupedProblemSizeHelper<ThreadblockShape>,
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount> {
static cutlass::FillMode const kFillModeC = FillModeC;
static_assert(kFillModeC == cutlass::FillMode::kLower || kFillModeC == cutlass::FillMode::kUpper,
"Default Rank2KGroupedProblemVisitor requires fill mode of kLower or kUpper.");
using ProblemSizeHelper = detail::Rank2KGroupedProblemSizeHelper<ThreadblockShape>;
using Base = GroupedProblemVisitor<ProblemSizeHelper,
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount>;
using OffsetHelper = typename ProblemSizeHelper::OffsetHelper;
using Params = typename Base::Params;
using SharedStorage = typename Base::SharedStorage;
//
// Methods
//
CUTLASS_DEVICE
Rank2KGroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base(params_, shared_storage_, block_idx)
{}
CUTLASS_DEVICE
cutlass::gemm::GemmCoord threadblock_offset(int32_t threadblock_id) const {
int32_t macro_id = threadblock_id / OffsetHelper::kThreadblockSkewRatio;
int32_t macro_row = ceil(cutlass::fast_sqrt((2*macro_id) + 2.25) - 0.5) - 1;
int32_t macro_col = macro_id - (((macro_row+1) * macro_row)/2);
if (kFillModeC == cutlass::FillMode::kUpper) {
swap(macro_row, macro_col);
}
int32_t row = OffsetHelper::macro_row_to_row(macro_row, threadblock_id);
int32_t col = OffsetHelper::macro_col_to_col(macro_col, threadblock_id);
return cutlass::gemm::GemmCoord(row, col, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h/0 | {
"file_path": "include/cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h",
"repo_id": "include",
"token_count": 5798
} | 40 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/params_sparse_base.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/semaphore.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled.
>
struct SparseGemm {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static bool const kSplitKSerial = SplitKSerial;
static int const kSparse = Mma::kSparse;
static int const kMetaSizeInBits = Mma::kMetaSizeInBits;
static int const kMaxID2 = Mma::kMaxID2;
static int const kElementsPerElementE = Mma::kElementsPerElementE;
using ElementE = typename Mma::ElementE;
using LayoutE = typename Mma::LayoutE;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using ParamsA = typename Mma::IteratorA::Params;
using TensorRefA = typename Mma::IteratorA::TensorRef;
using ParamsB = typename Mma::IteratorB::Params;
using TensorRefB = typename Mma::IteratorB::TensorRef;
using ParamsE = typename Mma::IteratorE::Params;
using TensorRefE = typename Mma::IteratorE::TensorRef;
/// Parameters structure
struct Params : public SparseParamsBase<
ThreadblockSwizzle, ParamsA, TensorRefA, ParamsB, TensorRefB,
ParamsE, TensorRefE> {
using Base = SparseParamsBase<
ThreadblockSwizzle, ParamsA, TensorRefA, ParamsB, TensorRefB,
ParamsE, TensorRefE>;
//
// Data members
//
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::TensorRef ref_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::TensorRef ref_D;
typename OutputOp::Params output_op;
int *semaphore;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
TensorRefA ref_A,
TensorRefB ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
TensorRefE ref_E,
typename OutputOp::Params output_op = typename OutputOp::Params(),
int *workspace = nullptr
):
Base(problem_size, grid_tiled_shape, ref_A, ref_B, ref_E, Mma::Shape::kK),
params_C(ref_C.layout()),
ref_C(ref_C),
params_D(ref_D.layout()),
ref_D(ref_D),
output_op(output_op) {
semaphore = workspace;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
SparseGemm() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
typename Mma::IteratorE::TensorRef ref_E) {
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
static int const kAlignmentE = Mma::IteratorE::AccessType::kElements;
if (!TensorRef_aligned(ref_A, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_D, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_E, kAlignmentE)) {
return Status::kErrorMisalignedOperand;
}
if ((problem_size.m() % kAlignmentA) || ((problem_size.k() / kSparse) % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC) ||
(problem_size.m() % kAlignmentE) || ((problem_size.k() / kSparse) % kAlignmentE)) {
return Status::kErrorMisalignedOperand;
}
// The k dimension has to be the multiple of the Threadblock k because out
// of bound meta data would be initialized to 0 by acync.zfill but 0 is not
// a valid meta data.
if (problem_size.k() % Mma::Shape::kK) {
return Status::kErrorMisalignedOperand;
}
// M dimension has to be multiple of 32 (sparse float) or 16 (sparse int)
// because of the row reordering of operand E
static int const kAlignmentM = (sizeof(ElementE) == 2) ? 32 : 16;
if (problem_size.m() % kAlignmentM) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size / kSparse,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
cutlass::MatrixCoord tb_offset_E{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size / kSparse,
};
// Problem size is a function of threadblock index in the K dimension
int problem_size_k = min(
params.problem_size.k(),
(threadblock_tile_offset.k() + 1) * params.gemm_k_size);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - tb_offset_B.row() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A, B, and E operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k / kSparse},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
typename Mma::IteratorE iterator_E(
params.params_E, params.ref_E.data(),
{params.problem_size.m(),
problem_size_k / kSparse / kElementsPerElementE},
thread_idx, tb_offset_E);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
if (!kSplitKSerial || gemm_k_iterations > 0) {
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_E, accumulators);
}
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// If performing a reduction via split-K, fetch the initial synchronization
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
params.ref_C.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op, iterator_D, accumulators, iterator_C);
//
// Release the semaphore
//
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
__threadfence();
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/sparse_gemm.h/0 | {
"file_path": "include/cutlass/gemm/kernel/sparse_gemm.h",
"repo_id": "include",
"token_count": 4865
} | 41 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting simt instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
namespace detail {
// convert a WarpShape which is the whole tile of elements into warp num threads.
// The goal is for each thread's tile of elements to be as square as possible
// for performance (4x4 will be faster than 2x8).
template<typename WarpShape>
constexpr int simt_get_warp_threads_m() {
return (WarpShape::kM > WarpShape::kN) ? 8 : 4;
}
/// Computes padding in shared memory to perform efficient transpose without bank conflicts.
constexpr int simt_transpose_padding(int threads, int crosswise, int size_in_bits) {
return (size_in_bits >= 32 ?
threads / crosswise / (size_in_bits / 32) :
threads / crosswise * (32 / size_in_bits)
);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
SmemThreadMapA // was IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB // was IteratorThreadMapA
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
SmemThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static_assert(!(kPaddingM % LaneM),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
static_assert(!(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2RowMajor,
ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::ColumnMajor,
ElementB,
layout::RowMajor,
ElementC,
LayoutC,
OperatorClass,
2,
Operator>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementC,
LayoutC,
OperatorClass,
2,
Operator>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2RowMajor, ElementC_,
LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::RowMajor,
ElementC,
LayoutC,
OperatorClass,
2,
Operator>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::ColumnMajor,
ElementB,
layout::ColumnMajor,
ElementC,
LayoutC,
OperatorClass,
2,
Operator>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: simt class, for dp4a
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 4>, int8_t,
layout::ColumnMajor, int8_t, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using LayoutA = layout::ColumnMajor;
using ElementB = int8_t;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorInterleaved<4>;
using SmemLayoutB = layout::RowMajorInterleaved<4>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(4, ThreadTileM);
static const int LaneN = cutlass::const_min(4, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
4>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::ColumnMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
PartitionsK /// Number of partitions along K dimension
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
//
///
/// A: Row-major
/// B: Column-major
/// Operator: simt class, for dp4a
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 4>, int8_t,
layout::RowMajor, int8_t, layout::ColumnMajor, ElementC_,
LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorInterleaved<4>;
using SmemLayoutB = layout::RowMajorInterleaved<4>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMap2DThreadTile<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
SmemThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapB = transform::TransposePitchLinearThreadMap2DThreadTile<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(4, ThreadTileM);
static const int LaneN = cutlass::const_min(4, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
4>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::ColumnMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
PartitionsK /// Number of partitions along K dimension
>;
static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>,
MatrixShape<0, kPaddingN>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
//
///
/// A: Row-major
/// B: Row-major
/// Operator: simt class, for dp4a
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 4>, int8_t,
layout::RowMajor, int8_t, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using ElementB = int8_t;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorInterleaved<4>;
using SmemLayoutB = layout::RowMajorInterleaved<4>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMap2DThreadTile<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
SmemThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(4, ThreadTileM);
static const int LaneN = cutlass::const_min(4, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
4>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::ColumnMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
PartitionsK /// Number of partitions along K dimension
>;
static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
//
///
/// A: Column-major
/// B: Column-major
/// Operator: simt class, for dp4a
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 4>, int8_t,
layout::ColumnMajor, int8_t, layout::ColumnMajor, ElementC_,
LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using LayoutA = layout::ColumnMajor;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorInterleaved<4>;
using SmemLayoutB = layout::RowMajorInterleaved<4>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapB = transform::TransposePitchLinearThreadMap2DThreadTile<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(4, ThreadTileM);
static const int LaneN = cutlass::const_min(4, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
4>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::ColumnMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
PartitionsK /// Number of partitions along K dimension
>;
static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, kPaddingN>,
WarpCount::kK
>;
};
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/default_mma_core_simt.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_mma_core_simt.h",
"repo_id": "include",
"token_count": 20761
} | 42 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
It loads two loop invariant vectors, norm and sum, in the prologue and
stores them in the register file. We will call elementwise operation to
apply norm and sum between ldmatrix and warp mma.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h"
#include "cutlass/gemm/threadblock/mma_base.h"
#include "cutlass/gemm/warp/softmax_scale_bias_transform.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class MmaMainloopFusionBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Policy describing tuning details
using Policy = Policy_;
//
// Dependent types
//
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = cutlass::gemm::GemmShape<Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);
/// Number of stages
static int const kStages = Stages;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;
/// Tensor reference to the B operand
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
class SharedStorage {
public:
//
// Type definitions
//
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,
Shape::kK * kStages +
Policy::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB =
MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow,
Shape::kN + Policy::SmemPaddingB::kColumn>;
public:
//
// Data members
//
/// Buffer for A operand
AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A;
/// Buffer for B operand
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;
public:
//
// Methods
//
/// Returns a layout object for the A matrix
CUTLASS_DEVICE
static typename Operator::LayoutA LayoutA() {
return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator::LayoutB LayoutB() {
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});
}
/// Returns a TensorRef to the A operand
CUTLASS_HOST_DEVICE
TensorRefA operand_A_ref() {
return TensorRefA{operand_A.data(), LayoutA()};
}
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB operand_B_ref() {
return TensorRefB{operand_B.data(), LayoutB()};
}
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator::IteratorB warp_tile_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaMainloopFusionBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx),
warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {}
};
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Iterates over vectors of var and mean vector in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorNormSum_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Whether problem has been transformed. This determines to which operand
/// the softmax is applied.
bool InternalTranspose,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
/// Used for partial specialization
typename Enable = bool>
class MmaSoftmaxMainloopFusionMultistage :
public MmaMainloopFusionBase<Shape_, Policy_, Stages> {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Iterates over tiles of the var and mean vectors in global memory
using IteratorNormSum = IteratorNormSum_;
///< Policy describing tuning details
using Policy = Policy_;
///< Base class
using Base = MmaMainloopFusionBase<Shape_, Policy, Stages>;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Minimum architecture is Sm80 to support cp.async
using ArchTag = arch::Sm80;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
/// Internal structure exposed for introspection.
struct Detail {
static_assert(Base::kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
};
private:
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
using WarpLoadedFragmentNormSum = typename IteratorNormSum::Fragment;
static bool const kInternalTranspose = InternalTranspose;
using SoftmaxFragment = typename platform::conditional<kInternalTranspose,
WarpTransformedFragmentB,
WarpTransformedFragmentA>::type;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
int warp_idx_m_;
int warp_idx_n_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaSoftmaxMainloopFusionMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
warp_idx_m_ = warp_idx_mn % Base::WarpCount::kM;
warp_idx_n_ = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m_, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n_});
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A,
IteratorB &iterator_B,
int group_start_A = 0, int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A *
IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_A.get();
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, iterator_A.valid());
} else {
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, iterator_A.valid());
}
++iterator_A;
}
++this->smem_iterator_A_;
}
}
iterator_B.set_iteration_index(group_start_B *
IteratorB::kAccessesPerVector);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B.get();
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B.valid());
} else {
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B.valid());
}
++iterator_B;
}
++this->smem_iterator_B_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< iterator over B operand in global memory
IteratorNormSum iterator_norm_sum,
///< initial value of accumulator
FragmentC const &src_accum) {
//
// Prologue
//
// Issue several complete stages
WarpLoadedFragmentNormSum warp_loaded_frag_norm_sum;
iterator_norm_sum.add_tile_offset({0, warp_idx_m_});
iterator_norm_sum.load(warp_loaded_frag_norm_sum);
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
int src_bytes = (iterator_A.valid() ? kSrcBytes : 0);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B.get(), iterator_B.valid());
++iterator_B;
}
++this->smem_iterator_B_;
}
// Move to the next stage
iterator_A.add_tile_offset({0, 1});
iterator_B.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Waits until kStages-2 stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[2];
WarpLoadedFragmentB warp_loaded_frag_B[2];
WarpTransformedFragmentA warp_transformed_frag_A[2];
WarpTransformedFragmentB warp_transformed_frag_B[2];
Operator warp_mma;
cutlass::gemm::warp::SoftmaxScaleBiasTransform<
SoftmaxFragment, WarpLoadedFragmentNormSum> elementwise_transform;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
// Start issuing the first group of the next stage outside of the mainloop
copy_tiles_and_advance(iterator_A, iterator_B);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0],
warp_loaded_frag_A[0], warp_loaded_frag_B[0]);
if (kInternalTranspose) {
elementwise_transform(warp_transformed_frag_B[0],
warp_loaded_frag_norm_sum);
} else {
elementwise_transform(warp_transformed_frag_A[0],
warp_loaded_frag_norm_sum);
}
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k > 0) {
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B[warp_mma_k % 2]);
if (kInternalTranspose) {
elementwise_transform(warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_norm_sum);
} else {
elementwise_transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_loaded_frag_norm_sum);
}
}
// Issue global->shared copies for the next stage
int group_start_iteration_A, group_start_iteration_B;
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
group_start_iteration_A = 0;
group_start_iteration_B = 0;
} else {
group_start_iteration_A =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB;
}
copy_tiles_and_advance(iterator_A, iterator_B,
group_start_iteration_A,
group_start_iteration_B);
warp_mma(
accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
accum
);
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A.add_tile_offset({0, 1});
iterator_B.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
}
// Do any conversions feeding the first stage at the end of the loop so
// we can start right away on mma instructions
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
if (kInternalTranspose) {
elementwise_transform(warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_norm_sum);
} else {
elementwise_transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_norm_sum);
}
}
}
}
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
// commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
// Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/mma_softmax_mainloop_fusion_multistage.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/mma_softmax_mainloop_fusion_multistage.h",
"repo_id": "include",
"token_count": 11315
} | 43 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 128b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCongruous128b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 8) && !(Shape::kStrided % 4), "Divisibility.");
static_assert(sizeof_bits<Element_>::value == 128, "This is specialized for 128b accesses.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCongruous128b;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 1;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<8, 4>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
Shape::kContiguous / Delta::kContiguous,
InstructionShape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0) {
int quad_pair = lane_id / 8;
int quad = lane_id / 4;
int lane = lane_id % 4;
int row = (quad & 1) * 4 + (lane ^ quad_pair);
byte_offset_ = (row + quad_pair * stride_) * sizeof(AccessType);
pointer_= reinterpret_cast<AccessType const *>(ref.data());
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
pointer_ += offset;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int offset =
(tile_offset.contiguous() * Shape::kContiguous) +
(tile_offset.strided() * InstructionShape::kStrided * stride_);
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
pointer_ += stride_ * InstructionShape::kStrided;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
int access_idx = c + s * Policy::Iterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c +
Policy::Delta::kStrided * s * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicandCongruous128b,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(-tile_offset.column(), -tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicandCongruous128b,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(-tile_offset.row(), -tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
///
/// Partial specialization for complex<T>
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of underlying field of reals.
typename RealElement,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpAccumulatorTileIterator<
Shape_, complex<RealElement>, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = complex<RealElement>;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
};
private:
// Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire
// shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements
// of that row. The accumulators within one row are assumed to be consecutive.
static int const kElementsPerAccess = InstructionShape::kN / 4;
static int const kRowsPerTile = 8;
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile. It is assumed that the accumulators
/// are stored in a planar complex arrangement with the real parts as entirely contiguous
/// followed by the imaginary parts.
using Fragment = Array<RealElement, Shape::kCount / kThreads * 2>;
static int const kRealIndex = 0;
static int const kImaginaryIndex = Shape::kCount / kThreads;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
Element z = offset_ref.at({accum_m, accum_n});
frag[mma_accum_start + row * kElementsPerAccess + col + kRealIndex] = z.real();
frag[mma_accum_start + row * kElementsPerAccess + col + kImaginaryIndex] = z.imag();
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
int idx = mma_accum_start + row * kElementsPerAccess + col;
Element z(frag[kRealIndex + idx], frag[kImaginaryIndex + idx]);
offset_ref.at({accum_m, accum_n}) = z;
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 128b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCrosswise128x4,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 8), "Divisibility.");
static_assert(sizeof_bits<Element_>::value == 128, "This is specialized for 128b accesses.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCrosswise128x4;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 1;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<4, 8>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
InstructionShape::kContiguous / Delta::kContiguous,
Shape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kStrided * InstructionShape::kContiguous / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0) {
int quad = lane_id / 4;
int liq = lane_id % 4;
int c = liq + (quad & 1) * 4;
int s = (quad / 2);
byte_offset_ = (c + s * stride_) * sizeof(AccessType);
pointer_= reinterpret_cast<AccessType const *>(ref.data());
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
pointer_ += offset;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
// Compute the offset in units of elements. Note, the external coordinate system is
// approximately transposed with respect to the tiled internal structure
int offset =
(tile_offset.contiguous() * InstructionShape::kContiguous) * stride_ +
(tile_offset.strided() * Shape::kStrided);
add_pointer_offset(offset);
byte_offset_ ^= (tile_offset.contiguous() & 1) * 4 * sizeof(AccessType);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
pointer_ += stride_ * InstructionShape::kContiguous;
byte_offset_ ^= 4 * sizeof(AccessType);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
int access_idx = s + c * Policy::Iterations::kStrided;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c * stride_ +
Policy::Delta::kStrided * s;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * InstructionShape::kContiguous * stride_ +
tile_offset.strided() * Shape::kStrided;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicandCrosswise128x4,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(-tile_offset.column(), -tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicandCrosswise128x4,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(-tile_offset.row(), -tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// Congruous shared memory layout
// Warp-level iterators for complex<float>*complex<float> + complex<float> => complex<float>
// The underlying iterators are similar to that for MMA f64*f64 + f64 = f64
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 64b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, cutlass::complex<float>,
cutlass::layout::TensorOpMultiplicandCongruous64b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 16) && !(Shape::kStrided % 8), "Divisibility.");
/// Element type
using Element = cutlass::complex<float>;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCongruous64b;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 2;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<8, 4>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess / Delta::kContiguous,
InstructionShape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
/// Internal counter used to jump to next K partition
int k_group_idx_;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0),
k_group_idx_(0) {
int access_strided = lane_id / Policy::Delta::kContiguous;
int access_contiguous = (lane_id % Policy::Delta::kContiguous) ^ access_strided;
pointer_= reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int offset =
(tile_offset.strided() * InstructionShape::kStrided) * stride_ * kElementsPerAccess +
tile_offset.contiguous() * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
add_tile_offset({0, 1});
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
add_tile_offset({0, -1});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
int access_idx = c + s * Policy::Iterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c +
Policy::Delta::kStrided * s * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
}
};
////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// Crosswise shared memory layout
// Warp-level iterators for complex<float>*complex<float> + complex<float> => complex<float>
// The underlying iterators are similar to that for f64*f64 + f64 = f64
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 64b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, complex<float>,
cutlass::layout::TensorOpMultiplicand64bCrosswise,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 16), "Divisibility.");
static_assert(sizeof_bits<complex<float>>::value == 64, "This is specialized for 64b accesses.");
/// Element type
using Element = complex<float>;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicand64bCrosswise;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 2;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<4, 16>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
InstructionShape::kContiguous / Delta::kContiguous,
Shape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kStrided * InstructionShape::kContiguous / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
/// Internal counter for tracking K-group
Index k_group_idx_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0),
k_group_idx_(0) {
int access_strided = lane_id / 8;
int access_contiguous = (lane_id % 8);
byte_offset_ = (access_contiguous + access_strided * stride_) * sizeof(AccessType);
pointer_= reinterpret_cast<AccessType const *>(ref.data());
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
pointer_ += offset / kElementsPerAccess;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int offset = (tile_offset.contiguous() * InstructionShape::kContiguous) *
stride_ * kElementsPerAccess +
tile_offset.strided() * Shape::kStrided;
add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
if (k_group_idx_ & 1)
byte_offset_ ^= 0x40;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
pointer_ += stride_ * InstructionShape::kContiguous;
// xor ptr
byte_offset_ ^= 0x40;
++k_group_idx_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
int access_idx = c * Policy::Iterations::kStrided + s;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c * stride_ +
Policy::Delta::kStrided * s / kElementsPerAccess;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
Element *exchange_ptr = reinterpret_cast<Element *>(&frag);
// exchange on 64b granularity only for fragments held in k=8/2 to k=8
CUTLASS_PRAGMA_UNROLL
for (int i = Fragment::kElements/2; i < Fragment::kElements; i += 2) {
Element tmp = exchange_ptr[i];
exchange_ptr[i] = exchange_ptr[i + 1];
exchange_ptr[i + 1] = tmp;
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset = tile_offset.contiguous() *
InstructionShape::kContiguous /
Layout::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
k_group_idx_ = k_group;
}
};
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h",
"repo_id": "include",
"token_count": 25875
} | 44 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm70.h"
#include "cutlass/platform/platform.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads>
class MmaVoltaTensorOpMultiplicandTileIterator;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand::kA, Element_,
cutlass::layout::VoltaTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kA;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kContiguous % InstructionShape::kContiguous),
"Shape of warp-level Mma must be divisible by operator shape.");
// Shape of one individual LDS.128
// TODO: 32 and 4 are hardcoded, 32-by-4 is logical shape
using LdsShape = layout::PitchLinearShape<
32,
4
>;
// LdsShapes are arranged in the strided direction in SMEM
using LdsIterations = layout::PitchLinearShape<
InstructionShape::kStrided / LdsShape::kStrided,
Shape::kContiguous / LdsShape::kContiguous
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Number of internal pointers needed to reference shared memory
static int const kPointerCount = 2;
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, Layout::kElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, Shape::kContiguous *
InstructionShape::kStrided / kThreads * 2>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_[kPointerCount];
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) {
// swizzle patterns for operandA LDS are
// 1. (tid[4] << 3) | (tid[2:0] ^ tid[4])
// 2. (tid[4] << 3) | (tid[2:0] ^ tid[4] ^ 0b10010)
int vec_row = (lane_id >> 4); // tid[4]
int vec_col = ((lane_id & 4) >> 2); // tid[2]
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount; ++i) {
if(i == 1) {
vec_row |= 2;
}
int access_contiguous_idx = (vec_col << 2) | ((lane_id & 3) ^ vec_row);
int access_contiguous = access_contiguous_idx;
int access_strided = vec_row;
pointer_[i] = reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int contiguous_offset = tile_offset.contiguous();
int strided_offset = tile_offset.strided();
// To support 32x32 tile size
if (Shape::kContiguous == Policy::LdsShape::kContiguous) {
if (contiguous_offset % 2) {
AccessType const *tmp_pointer = pointer_[0];
pointer_[0] = pointer_[1];
pointer_[1] = tmp_pointer;
}
contiguous_offset = contiguous_offset / 2 * 2;
}
int offset = (strided_offset * InstructionShape::kStrided) * stride_ *
Layout::kElementsPerAccess +
contiguous_offset * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator++() {
byte_offset_ += stride_ * InstructionShape::kStrided * sizeof(Element) *
Layout::kElementsPerAccess;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator--() {
byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) *
Layout::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType * fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsIterations::kContiguous;
AccessType const *source_ptr = pointer_[s & 1] +
Policy::LdsShape::kContiguous * c +
Policy::LdsShape::kStrided * (s / 2) * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
fetch_ptr[access_idx] = *(reinterpret_cast<AccessType const*> (source_byte_ptr));
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous /
Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand::kB, Element_,
cutlass::layout::VoltaTensorOpMultiplicandBCongruous<
sizeof_bits<Element_>::value>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kB;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kContiguous % InstructionShape::kContiguous),
"Shape of warp-level Mma must be divisible by operator shape.");
// Shape of one individual LDS
// TODO: remove hardcoded 32 and 4
using LdsShape = layout::PitchLinearShape<
32,
4
>;
using LdsIterations = layout::PitchLinearShape<
Shape::kContiguous / LdsShape::kContiguous,
InstructionShape::kStrided / LdsShape::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, Layout::kElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile, needs on more time number of registers
using Fragment = Array<Element, Shape::kContiguous *
InstructionShape::kStrided / kThreads * 2>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) {
// swizzle pattern is (tid & (3 << 3) | (tid[1:0] ^ tid[4:3]))
int access_strided = (lane_id >> 3) & 0x3;
int access_contiguous = ((lane_id ^ (lane_id >> 3)) & 0x3);
pointer_ = reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int contiguous_offset = tile_offset.contiguous();
int strided_offset = tile_offset.strided();
int offset = (strided_offset * InstructionShape::kStrided) * stride_ *
Layout::kElementsPerAccess +
contiguous_offset * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator++() {
byte_offset_ += stride_ * InstructionShape::kStrided * sizeof(Element) *
Layout::kElementsPerAccess;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator--() {
byte_offset_ += stride_ * InstructionShape::kStrided * sizeof(Element) *
Layout::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType * fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsIterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::LdsShape::kContiguous / Layout::kElementsPerAccess * c +
Policy::LdsShape::kStrided * s * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
fetch_ptr[access_idx] = *(reinterpret_cast<AccessType const*> (source_byte_ptr));
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous /
Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand::kA, Element_,
cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kA;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaVoltaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand::kB, Element_,
cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<
sizeof_bits<Element_>::value>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kB;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaVoltaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store
/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major
/// accumulator layout.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaVoltaTensorOpAccumulatorTileIterator {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
/// Volta Tensor Op uses 32x32 interleaved tile
using InterleavedTile = MatrixShape<32, 32>;
static_assert(!(Shape::kRow % InterleavedTile::kRow) && !(Shape::kColumn % InterleavedTile::kColumn),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using TileIterations = MatrixShape<
Shape::kRow / InterleavedTile::kRow,
Shape::kColumn / InterleavedTile::kColumn
>;
using MmaIterations =
MatrixShape<InterleavedTile::kRow / InstructionShape::kM,
InterleavedTile::kColumn / InstructionShape::kN>;
};
private:
// Assume accumulator tile is multipile interleaved 32x32 tile.
static int const kElementsPerPartial = 4;
using EleShapePerPatial = typename platform::conditional<
platform::is_same<Element, float>::value,
MatrixShape<2, 2>,
MatrixShape<1, 4> >::type;
static int const kElementsPerMma = 8;
static int const kAccumulatorPatials = 2;
using QuadShapePerPatialMma = MatrixShape<4, 4>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, Shape::kCount / kThreads>;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
int accum_m, accum_n;
if (platform::is_same<Element, float>::value) {
// (quad[2],quad[0])+lane_in_quad[0]
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1);
// (quad[1])+lane_in_quad[1]
accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials +
(lane_in_quad & 2);
} else {
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + lane_in_quad; // (quad[2],quad[0])
accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials;
}
MatrixCoord lane_offset(accum_m, accum_n);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) {
CUTLASS_PRAGMA_UNROLL
for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) {
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start =
(((tile_n * Policy::TileIterations::kRow + tile_m) *
Policy::MmaIterations::kColumn + mma_n) *
Policy::MmaIterations::kRow + mma_m) *
kElementsPerMma;
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < kAccumulatorPatials; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < EleShapePerPatial::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < EleShapePerPatial::kColumn; ++n) {
int accum_m = tile_m * Policy::InterleavedTile::kRow +
mma_m * QuadShapePerPatialMma::kRow + m * 2;
int accum_n = tile_n * Policy::InterleavedTile::kColumn +
mma_n * QuadShapePerPatialMma::kColumn +
p * Policy::InterleavedTile::kColumn/2 + n;
int idx = mma_accum_start + p * kElementsPerPartial +
m * EleShapePerPatial::kColumn + n;
frag[idx] = offset_ref.at({accum_m, accum_n});
}
}
}
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_HOST_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_HOST_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) {
CUTLASS_PRAGMA_UNROLL
for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) {
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start =
(((tile_n * Policy::TileIterations::kRow + tile_m) *
Policy::MmaIterations::kColumn + mma_n) *
Policy::MmaIterations::kRow + mma_m) *
kElementsPerMma;
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < kAccumulatorPatials; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < EleShapePerPatial::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < EleShapePerPatial::kColumn; ++n) {
int accum_m = tile_m * Policy::InterleavedTile::kRow +
mma_m * QuadShapePerPatialMma::kRow + m * 2;
int accum_n = tile_n * Policy::InterleavedTile::kColumn +
mma_n * QuadShapePerPatialMma::kColumn +
p * Policy::InterleavedTile::kColumn/2 + n;
int idx = mma_accum_start + p * kElementsPerPartial +
m * EleShapePerPatial::kColumn + n;
offset_ref.at({accum_m, accum_n}) = frag[idx];
}
}
}
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_HOST_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_HOST_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_HOST_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDS to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// KBlock size (in units of elements)
int KBlock>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::VoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, KBlock>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand == Operand::kB,
"MmaVoltaTensorOpMultiplicandIterator may only be instantiated for "
"A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// KBlock size
static int const kKBlock = KBlock;
/// Layout of source tile
using Layout = cutlass::layout::VoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, kKBlock>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
/// Shape of one individual LDS instruction
using LdsShape = layout::PitchLinearShape<1, 32>;
/// Number and arrangement of LDSM instructions
using LdsIterations = layout::PitchLinearShape<1, Shape::kStrided / 32>;
/// Using LDS.128
static int const kElementsPerAccess = 8;
/// Contiguous elements per line
static int const kContiguousElementsPerLine = 4;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element,
Shape::kStrided * InstructionShape::kContiguous / kThreads * 2>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
/// Crosswised elements are arranged in a SMEM line
/// in units of AccessType
Index line_size;
/// Internal counter used to determine load addr offset
/// and when to swap higher 64bit with lower 64bit
int k_group_idx_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator()
: pointer_(nullptr),
stride_(0),
line_size(0),
byte_offset_(0),
k_group_idx_(0) {}
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: pointer_(reinterpret_cast<AccessType const *>(ref.data())),
stride_(ref.stride(0) * Policy::kElementsPerAccess),
line_size((ref.stride(0) * Policy::kContiguousElementsPerLine) /
Policy::kElementsPerAccess),
k_group_idx_(0),
byte_offset_(0) {
int quad = (lane_id / 4);
int lane_in_quad = (lane_id % 4);
int access_contiguous;
if(kOperand == Operand::kA) {
// swizzle id: tid[4]|tid[1:0]|(tid[2]^tid[4])
access_contiguous = ((quad & 0x4) << 1) + ((lane_in_quad) << 1) +
((quad & 0x1) ^ ((quad & 0x4) >> 2));
} else {
// swizzle id: tid[4]|tid[1:0]|tid[3]
access_contiguous = ((quad & 0x4) << 1) + (lane_in_quad << 1) +
((quad & 0x2) >> 1 ^ ((quad & 0x4) >> 2));
}
byte_offset_ = access_contiguous *
sizeof(Element) * Policy::kElementsPerAccess;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
int contiguous_offset = tile_offset.contiguous();
int strided_offset = tile_offset.strided();
k_group_idx_ = 0;
pointer_ += contiguous_offset *
(InstructionShape::kContiguous /
Policy::kContiguousElementsPerLine) *
line_size +
strided_offset * Shape::kStrided / 2;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator++() {
k_group_idx_ = (k_group_idx_ + 1) % 8;
if (k_group_idx_ == 4 || k_group_idx_ == 0) {
byte_offset_ ^= 1 * sizeof(Element) * Policy::kElementsPerAccess;
}
pointer_ += line_size;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator--() { assert(0); }
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { load_with_byte_offset(frag, 0); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType * fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsIterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::LdsShape::kContiguous * c * line_size +
Policy::LdsShape::kStrided * s / 2;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
fetch_ptr[access_idx] = *(reinterpret_cast<AccessType const*> (source_byte_ptr));
// swap higher 64bit and lower 64bit
if (k_group_idx_ & 0x2) {
uint64_t *low = reinterpret_cast<uint64_t *>(&frag) + access_idx * 2;
uint64_t *high = reinterpret_cast<uint64_t *>(&frag) + access_idx * 2 + 1;
uint64_t tmp = *low;
*low = *high;
*high = tmp;
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset = tile_offset.contiguous() *
InstructionShape::kContiguous /
Policy::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
k_group_idx_ = k_group;
}
};
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDS to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// KBlock size (in units of elements)
int KBlock>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, KBlock>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand == Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for "
"A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// KBlock size
static int const kKBlock = KBlock;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, kKBlock>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaVoltaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
kKBlock>,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator() {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: iterator_({ref.data(), ref.stride()}, lane_id) {}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { iterator_.load(frag); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag, {tile_offset.contiguous(), tile_offset.strided()}, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDS to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// KBlock size (in units of elements)
int KBlock>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, KBlock>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand == Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for "
"A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// KBlock size
static int const kKBlock = KBlock;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, kKBlock>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaVoltaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
kKBlock>,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator() {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: iterator_({ref.data(), ref.stride()}, lane_id) {}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { iterator_.load(frag); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for 'TN' arrangement
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand_,
/// Data type of A elements
typename Element_,
/// Layout of matrix operand
typename Layout_,
/// Shape of one matrix production operation (concept: MatrixShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads = 32,
/// Number of partitions along K dimension
int PartitionsK_ = 1>
class MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
/// Basic check
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaVoltaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Number of elements accessed per Shared Memory load
static int const kElementsPerAccess = 4;
private:
static int const kInterleavedTileRows = 32;
static int const kInterleavedTileColumns = 32;
static int const kInstructionsPerTile = 2;
/// Rounded up instruction counts
using TileCount = MatrixShape<
Shape::kRow / kInterleavedTileRows,
Shape::kColumn / kInterleavedTileColumns
>;
using FragmentCount = MatrixShape<
TileCount::kRow * kInstructionsPerTile,
TileCount::kColumn * kInstructionsPerTile
>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
(kOperand == Operand::kA ? FragmentCount::kRow : FragmentCount::kColumn) * kElementsPerAccess
>;
/// Memory access type
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Underlying tensor reference
TensorRef ref_;
/// Extent of tensor
MatrixCoord extent_;
/// Origin
MatrixCoord origin_;
/// Used to conditionally enable extents checking
bool divisible_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner(): divisible_(true) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner(
TensorRef const &ref,
int lane_id
):
ref_(ref), extent_(Shape::kRow, Shape::kColumn), divisible_(true) {
int quad_id = lane_id / 4;
int lane_in_quad = (lane_id % 4);
if (kOperand == Operand::kA) {
int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile + lane_in_quad;
int col_idx = 0;
origin_ = MatrixCoord(row_idx, col_idx);
}
else {
int row_idx = 0;
int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile + lane_in_quad;
origin_ = MatrixCoord(row_idx, col_idx);
}
ref_.add_coord_offset(origin_);
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner(
TensorRef const &ref,
TensorCoord extent,
int lane_id
): ref_(ref), extent_(extent), divisible_(false) {
int quad_id = lane_id / 4;
int lane_in_quad = (lane_id % 4);
if (kOperand == Operand::kA) {
int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile + lane_in_quad;
int col_idx = 0;
origin_ = MatrixCoord(row_idx, col_idx);
}
else {
int row_idx = 0;
int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile + lane_in_quad;
origin_ = MatrixCoord(row_idx, col_idx);
}
#if defined(__CUDA_ARCH__)
__syncthreads();
#endif
ref_.add_coord_offset(origin_);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner &add_tile_offset(TensorCoord const &tile_offset) {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator++() {
if (kOperand == Operand::kA) {
add_tile_offset({0, 1});
}
else {
add_tile_offset({1, 0});
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator--() {
if (kOperand == Operand::kA) {
add_tile_offset({0, -1});
}
else {
add_tile_offset({-1, 0});
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(ref_.data());
int ldm = ref_.stride()[0];
if (kOperand == Operand::kA) {
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < FragmentCount::kRow; ++idx) {
int tile_idx = idx / 2;
int quad_idx = idx % 2;
int row_offset = tile_idx * kInterleavedTileRows + quad_idx * 4;
frag_ptr[idx] = access_ptr[row_offset * ldm / kElementsPerAccess];
}
}
else {
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < FragmentCount::kColumn; ++idx) {
int tile_idx = idx / 2;
int quad_idx = idx % 2;
int col_offset = tile_idx * kInterleavedTileColumns + quad_idx * 4;
frag_ptr[idx] = access_ptr[col_offset * ldm / kElementsPerAccess];
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
load_with_pointer_offset(frag, byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + pointer_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation
}
};
/// Tile iterator specialized for 'NT' arrangement
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand_,
/// Data type of A elements
typename Element_,
/// Layout of matrix operand
typename Layout_,
/// Shape of one matrix production operation (concept: MatrixShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads = 32,
/// Number of partitions along K dimension
int PartitionsK_ = 1>
class MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
/// Basic check
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaVoltaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Number of elements accessed per Shared Memory load
static int const kElementsPerAccess = 4;
private:
static int const kInterleavedTileRows = 32;
static int const kInterleavedTileColumns = 32;
static int const kInstructionsPerTile = 2;
/// Rounded up instruction counts
using TileCount = MatrixShape<
Shape::kRow / kInterleavedTileRows,
Shape::kColumn / kInterleavedTileColumns
>;
using FragmentCount = MatrixShape<
TileCount::kRow * kInstructionsPerTile,
TileCount::kColumn * kInstructionsPerTile
>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
(kOperand == Operand::kA ? FragmentCount::kRow : FragmentCount::kColumn) * kElementsPerAccess
>;
/// Memory access type
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Underlying tensor reference
TensorRef ref_;
/// Extent of tensor
MatrixCoord extent_;
/// Origin
MatrixCoord origin_;
/// Used to conditionally enable extents checking
bool divisible_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter(): divisible_(true) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter(
TensorRef const &ref,
int lane_id
):
ref_(ref), extent_(Shape::kRow, Shape::kColumn), divisible_(true) {
int quad_id = lane_id / 4;
int lane_in_quad = (lane_id % 4);
if (kOperand == Operand::kA) {
int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile;
int col_idx = lane_in_quad;
origin_ = MatrixCoord(row_idx, col_idx);
}
else {
int row_idx = lane_in_quad;
int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile;
origin_ = MatrixCoord(row_idx, col_idx);
}
ref_.add_coord_offset(origin_);
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter(
TensorRef const &ref,
TensorCoord extent,
int lane_id
): ref_(ref), extent_(extent), divisible_(false) {
int quad_id = lane_id / 4;
int lane_in_quad = (lane_id % 4);
if (kOperand == Operand::kA) {
int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile;
int col_idx = lane_in_quad;
origin_ = MatrixCoord(row_idx, col_idx);
}
else {
int row_idx = lane_in_quad;
int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile;
origin_ = MatrixCoord(row_idx, col_idx);
}
#if defined(__CUDA_ARCH__)
__syncthreads();
#endif
ref_.add_coord_offset(origin_);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter &add_tile_offset(TensorCoord const &tile_offset) {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator++() {
if (kOperand == Operand::kA) {
add_tile_offset({0, 1});
}
else {
add_tile_offset({1, 0});
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator--() {
if (kOperand == Operand::kA) {
add_tile_offset({0, -1});
}
else {
add_tile_offset({-1, 0});
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(ref_.data());
int ldm = ref_.stride()[0];
if (kOperand == Operand::kA) {
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < FragmentCount::kRow; ++idx) {
int tile_idx = idx / 2;
int quad_idx = idx % 2;
int row_offset = tile_idx * kInterleavedTileRows;
frag_ptr[idx] = access_ptr[row_offset / kElementsPerAccess + quad_idx];
}
}
else {
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < FragmentCount::kColumn; ++idx) {
int tile_idx = idx / 2;
int quad_idx = idx % 2;
int col_offset = tile_idx * kInterleavedTileColumns;
frag_ptr[idx] = access_ptr[col_offset / kElementsPerAccess + quad_idx];
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
load_with_pointer_offset(frag, byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + pointer_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_,
Operand::kA,
Element_,
cutlass::layout::RowMajor,
InstructionShape_,
OpDelta_,
32
> : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner<
Shape_, Operand::kA, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> {
public:
using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner<
Shape_, Operand::kA, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> ;
using TensorRef = typename Base::TensorRef;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): Base(ref, lane_id) { }
};
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_,
Operand::kA,
Element_,
cutlass::layout::ColumnMajor,
InstructionShape_,
OpDelta_,
32
> : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter<
Shape_, Operand::kA, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_> {
public:
using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter<
Shape_, Operand::kA, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_> ;
using TensorRef = typename Base::TensorRef;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): Base(ref, lane_id) { }
};
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand::kB, Element_,
cutlass::layout::ColumnMajor,
InstructionShape_, OpDelta_, 32
> : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner<
Shape_, Operand::kB, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_> {
public:
using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner<
Shape_, Operand::kB, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_>;
using TensorRef = typename Base::TensorRef;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): Base(ref, lane_id) { }
};
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand::kB, Element_,
cutlass::layout::RowMajor,
InstructionShape_, OpDelta_, 32
> : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter<
Shape_, Operand::kB, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> {
public:
using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter<
Shape_, Operand::kB, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_>;
using TensorRef = typename Base::TensorRef;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): Base(ref, lane_id) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h",
"repo_id": "include",
"token_count": 35217
} | 45 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/detail/dependent_false.hpp"
#include "cute/numeric/integral_constant.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/arch/barrier.h"
#include "cute/util/type_traits.hpp"
#include "cute/container/array.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
using namespace cute;
enum class BarrierStatus : uint32_t {
WaitAgain = 0u,
WaitDone = 1u,
WaitOnly = 2u
};
class ArrivalToken {
public:
CUTLASS_HOST_DEVICE
ArrivalToken(BarrierStatus barrier_status) : barrier_status_(barrier_status) {}
CUTLASS_HOST_DEVICE
ArrivalToken() = delete;
CUTLASS_HOST_DEVICE
BarrierStatus get() const {
return barrier_status_;;
}
CUTLASS_HOST_DEVICE
bool operator==(ArrivalToken const& other) const {
return barrier_status_ == other.get();
}
private:
BarrierStatus barrier_status_;
CUTLASS_HOST_DEVICE
friend bool operator==(const ArrivalToken& left, const BarrierStatus& right) {
return left.get() == right;
}
CUTLASS_HOST_DEVICE
friend bool operator==(const BarrierStatus& left, const ArrivalToken& right) {
return left == right.get();
}
CUTLASS_HOST_DEVICE
friend bool operator!=(const ArrivalToken& left, const BarrierStatus& right) {
return left.get() != right;
}
CUTLASS_HOST_DEVICE
friend bool operator!=(const BarrierStatus& left, const ArrivalToken& right) {
return left != right.get();
}
};
class ProducerToken : public ArrivalToken {
using ArrivalToken::ArrivalToken;
};
class ConsumerToken : public ArrivalToken {
using ArrivalToken::ArrivalToken;
};
// Circular Buffer Index + Associated Phase
// Assumes only one operation possible - i.e., ++
template<uint32_t Stages_>
struct PipelineState {
static constexpr uint32_t Stages = Stages_;
int index_ = 0;
uint32_t phase_ = 0;
uint32_t count_ = 0;
CUTLASS_DEVICE
PipelineState(): index_{}, phase_{}, count_{} {}
CUTLASS_DEVICE
PipelineState(int index, uint32_t phase, uint32_t count)
: index_(index)
, phase_(phase)
, count_(count) {}
CUTLASS_DEVICE
int index() const {
return index_;
}
CUTLASS_DEVICE
uint32_t phase() const {
return phase_;
}
CUTLASS_DEVICE
uint32_t count() const {
return count_;
}
CUTLASS_DEVICE
void operator++() {
if constexpr (Stages > 0) {
++index_;
++count_;
if (index_ == Stages) {
index_ = 0;
phase_ ^= 1;
}
}
}
CUTLASS_DEVICE
PipelineState& operator+=(uint32_t num_iterations) {
return advance(num_iterations);
}
CUTLASS_DEVICE
PipelineState& operator=(PipelineState const& other) {
index_ = other.index();
phase_ = other.phase();
count_ = other.count();
return *this;
}
CUTLASS_DEVICE
PipelineState& advance(uint32_t num_iterations) {
if constexpr (Stages > 0) {
// Number of iterations cross over the stage boundary => flipped phase
if ((num_iterations < Stages) && (index_ + num_iterations) >= Stages ) {
phase_ ^= 1;
}
// How many times number of iterations cross over the stage boundary and
// end up on a odd number => flipped phase
if ((num_iterations >= Stages) && (((index_ + num_iterations) / Stages) % 2) == 1) {
phase_ ^= 1;
}
index_ = (index_ + num_iterations) % Stages;
count_ += num_iterations;
}
return *this;
}
CUTLASS_DEVICE
static PipelineState make_pipeline_state(PipelineState start_state, uint32_t num_iterations) {
return start_state.advance(num_iterations);
}
};
template<class Pipeline>
CUTLASS_DEVICE
PipelineState<Pipeline::Stages> make_producer_start_state() {
// Producer starts with an opposite phase as the buffers are initially empty
constexpr int InitialProducerStage = 0;
constexpr uint32_t InitialProducerPhase = 1;
constexpr uint32_t InitialProducerCount = 0;
return {InitialProducerStage, InitialProducerPhase, InitialProducerCount};
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// TMA load (producer) Async Pipeline class
//
///////////////////////////////////////////////////////////////////////////////////////////////////
// Assumptions : Constructor is visible Cluster-wide (as it needs a Cluster-Sync)
// We have exactly one thread elected in the Producer as the "leader"
// Currently, it is optional to elect a leader for the Consumers
template <int Stages_>
class PipelineTmaAsync {
public :
using FullBarrier = cutlass::arch::ClusterTransactionBarrier;
using EmptyBarrier = cutlass::arch::ClusterBarrier;
using ProducerBarrierType = FullBarrier::ValueType;
using ConsumerBarrierType = EmptyBarrier::ValueType;
static constexpr uint32_t Stages = Stages_;
using PipelineState = cutlass::PipelineState<Stages>;
struct SharedStorage {
FullBarrier full_barrier_[Stages];
EmptyBarrier empty_barrier_[Stages];
};
enum class ThreadCategory {
NonParticipant,
Producer,
Consumer,
ProducerConsumer
};
struct Params {
uint32_t transaction_bytes = 0;
ThreadCategory role = ThreadCategory::NonParticipant;
uint32_t is_leader = 0;
uint32_t num_consumers = 0;
};
// Constructor
template<typename ClusterShape>
CUTLASS_DEVICE
PipelineTmaAsync(SharedStorage& storage, Params params, ClusterShape cluster_shape)
: params_(params)
, full_barrier_ptr_(&storage.full_barrier_[0])
, empty_barrier_ptr_(&storage.empty_barrier_[0]) {
int warp_idx = canonical_warp_idx();
int lane_predicate = cute::elect_one_sync();
if (warp_idx == 0 && lane_predicate == 1) {
// Barrier FULL init
for (int i = 0; i < Stages; ++i) {
full_barrier_ptr_[i].init(1);
}
uint32_t const num_consumer_warpgroups_per_cluster = params_.num_consumers / NumThreadsPerWarpGroup;
uint32_t const multicast_consumer_arrival_count = (cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1) *
num_consumer_warpgroups_per_cluster;
// Barrier EMPTY init
for (int i = 0; i < Stages; ++i) {
empty_barrier_ptr_[i].init(multicast_consumer_arrival_count);
}
}
cutlass::arch::fence_barrier_init();
// Logic to optimally schedule Empty Arrives
// Goal : To divide SYNCS Empty Arrival duty equally amongst the Warp-Group (128 threads)
dim3 block_id = cute::block_id_in_cluster();
auto cluster_size = cute::size(cluster_shape);
static constexpr int MaxClusterSize = 16;
// STEP 1 : Use Cute Layout function to generate an optimal dst block-id (0-15)
if (params_.num_consumers % NumThreadsPerWarpGroup == 0) {
int thread_idx = threadIdx.x % NumThreadsPerWarpGroup;
is_signalling_thread_ = (thread_idx % (NumThreadsPerWarpGroup / MaxClusterSize)) == 0;
auto layout = cute::composition(Swizzle<2,0,-2>{},
Layout<Shape<_4,_4>,Stride<_4,_1>>{});
uint32_t thread_row = warp_idx % 4;
uint32_t thread_col = (thread_idx / 8) % 4;
dst_blockid_ = layout(thread_row, thread_col);
}
else if (params_.num_consumers == 32) {
int thread_idx = threadIdx.x % 32;
is_signalling_thread_ = (thread_idx % (32 / MaxClusterSize)) == 0;
auto layout = Layout<Shape<_4,_4>,Stride<_4, _1>>{};
uint32_t thread_row = thread_idx / 8;
uint32_t thread_col = (thread_idx % 8) / 2;
dst_blockid_ = layout(thread_row, thread_col);
}
else {
is_signalling_thread_ = 0;
#ifndef NDEBUG
asm volatile ("brkpt;\n" ::);
#endif
}
// STEP 2: Find if this dst block-id needs an arrival for this problem
is_signalling_thread_ &= dst_blockid_ < cluster_size;
is_signalling_thread_ &= is_same_row_or_col(dst_blockid_, block_id, cluster_shape);
}
template <typename ClusterShape>
CUTLASS_DEVICE
bool is_same_row_or_col(int dst_block_id, dim3 block_id, ClusterShape cluster_shape) {
return (((dst_block_id % cute::size<0>(cluster_shape)) == block_id.x) ||
(
((dst_block_id / cute::size<0>(cluster_shape)) == block_id.y)
));
}
////////////////////
// Producer APIs
////////////////////
// Four member functions are always used in pairs:
//
// * producer_try_acquire and producer_acquire, and
// * consumer_try_wait and consumer_wait.
//
// The two functions with "try" in their names are called "try" functions,
// and the other two are conceptually "finalize" functions.
// The "try" function in each pair starts the process of waiting on the barrier to flip.
// It opportunistically waits for an implementation-dependent timeout.
// Whether or not the barrier has flipped yet, the try function will return a token.
// If the token indicates that the barrier has not flipped,
// then the token must be passed into the corresponding "finalize" function.
// The finalize function will then block until the barrier has flipped.
// If the token indicates that the barrier _has_ flipped,
// then it is still correct to pass it into the finalize function.
// The finalize function will return immediately in that case.
CUTLASS_DEVICE
ProducerToken producer_try_acquire(PipelineState state, uint32_t skip_wait = false) {
return producer_try_acquire(state.index(), state.phase(), skip_wait);
}
CUTLASS_DEVICE
void producer_acquire(PipelineState state, ProducerToken barrier_token = {BarrierStatus::WaitAgain}) {
producer_acquire(state.index(), state.phase(), barrier_token);
}
CUTLASS_DEVICE
void producer_commit(PipelineState state, uint32_t bytes) {
producer_commit(state.index(), bytes);
}
// Prevents early exit of producer blocks in Cluster.
// This should be called once before kernel exits.
CUTLASS_DEVICE
void producer_tail(PipelineState state) {
for (int count = 0; count < Stages; ++count) {
producer_acquire(state, {BarrierStatus::WaitOnly});
++state;
}
}
CUTLASS_DEVICE
ProducerBarrierType* producer_get_barrier(PipelineState state) {
return producer_get_barrier(state.index());
}
////////////////////
// Consumer APIs
////////////////////
CUTLASS_DEVICE
ConsumerToken consumer_try_wait(PipelineState state, uint32_t skip_wait = false) {
return consumer_try_wait(state.index(), state.phase(), skip_wait);
}
CUTLASS_DEVICE
ConsumerToken consumer_test_wait(PipelineState state, uint32_t skip_wait = false) {
return consumer_test_wait(state.index(), state.phase(), skip_wait);
}
CUTLASS_DEVICE
void consumer_wait(PipelineState state) {
consumer_wait(state.index(), state.phase());
}
CUTLASS_DEVICE
void consumer_wait(PipelineState state, ConsumerToken barrier_token) {
consumer_wait(state.index(), state.phase(), barrier_token);
}
CUTLASS_DEVICE
void consumer_release(PipelineState state) {
consumer_release(state.index());
}
private :
uint32_t dst_blockid_ = 0;
uint32_t is_signalling_thread_ = 0;
FullBarrier *full_barrier_ptr_ = nullptr;
EmptyBarrier *empty_barrier_ptr_ = nullptr;
Params params_;
CUTLASS_DEVICE
ProducerToken producer_try_acquire(uint32_t stage, uint32_t phase, uint32_t skip_wait) {
if (skip_wait) {
return {BarrierStatus::WaitDone};
}
uint32_t barrier_status = empty_barrier_ptr_[stage].try_wait(phase);
return {static_cast<BarrierStatus>(barrier_status)};
}
CUTLASS_DEVICE
void producer_acquire(uint32_t stage, uint32_t phase, ProducerToken barrier_token) {
if (barrier_token != BarrierStatus::WaitDone) {
empty_barrier_ptr_[stage].wait(phase);
}
if (barrier_token == BarrierStatus::WaitOnly) {
return;
}
if (params_.is_leader) {
full_barrier_ptr_[stage].arrive_and_expect_tx(params_.transaction_bytes);
}
#ifndef NDEBUG
if (params_.role == ThreadCategory::Consumer || params_.role == ThreadCategory::NonParticipant) {
asm volatile ("brkpt;\n" ::);
}
// Most likely you have elected more than one leader
if (params_.is_leader && (threadIdx.x % 32 != 0)) {
asm volatile ("brkpt;\n" ::);
}
#endif
}
// NOP for TMA based mainloop
CUTLASS_DEVICE
void producer_commit(uint32_t stage, uint32_t bytes) {
// Below code is used only for unit-testing (in the absence of TMA commit)
#if CUTLASS_UNIT_TEST_PIPELINE
if (params_.is_leader) {
// STEP 1 : Commit to self
full_barrier_ptr_[stage].complete_transaction(bytes);
// STEP 2 : Commit to other blocks in our cluster
auto cluster_shape = cute::cluster_shape();
Layout block_layout_in_cluster = make_layout(cluster_shape);
dim3 local_block_id = cute::block_id_in_cluster();
CUTLASS_PRAGMA_UNROLL
for(int n = 0; n < size<1>(block_layout_in_cluster); ++n) {
uint32_t dst_block_id = block_layout_in_cluster(local_block_id.x,n,Int<0>{});
full_barrier_ptr_[stage].complete_transaction(dst_block_id, bytes, n!=local_block_id.y);
}
CUTLASS_PRAGMA_UNROLL
for(int m = 0; m < size<0>(block_layout_in_cluster); ++m) {
uint32_t dst_block_id = block_layout_in_cluster(m,local_block_id.y,Int<0>{});
full_barrier_ptr_[stage].complete_transaction(dst_block_id, bytes, m!=local_block_id.x);
}
}
#endif
}
CUTLASS_DEVICE
ConsumerToken consumer_try_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) {
if (skip_wait) {
return {BarrierStatus::WaitDone};
}
uint32_t barrier_status = full_barrier_ptr_[stage].try_wait(phase);
return {static_cast<BarrierStatus>(barrier_status)};
}
CUTLASS_DEVICE
ConsumerToken consumer_test_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) {
if (skip_wait) {
return {BarrierStatus::WaitDone};
}
uint32_t barrier_status = full_barrier_ptr_[stage].test_wait(phase);
return {static_cast<BarrierStatus>(barrier_status)};
}
// Wait for producer to commit transactions (done by TMA)
CUTLASS_DEVICE
void consumer_wait(uint32_t stage, uint32_t phase) {
full_barrier_ptr_[stage].wait(phase);
}
// Wait for producer to commit transactions (done by TMA)
CUTLASS_DEVICE
void consumer_wait(uint32_t stage, uint32_t phase, ConsumerToken barrier_token) {
if (barrier_token == BarrierStatus::WaitAgain) {
full_barrier_ptr_[stage].wait(phase);
}
}
// Consumer signalling Producer of completion
// Ensures all blocks in the Same Row and Column get notifed.
CUTLASS_DEVICE
void consumer_release(uint32_t stage, uint32_t skip = false) {
empty_barrier_ptr_[stage].arrive(dst_blockid_, is_signalling_thread_ & (!skip));
#ifndef NDEBUG
if (params_.role == ThreadCategory::Producer || params_.role == ThreadCategory::NonParticipant) {
asm volatile ("brkpt;\n" ::);
}
#endif
}
CUTLASS_DEVICE
ProducerBarrierType* producer_get_barrier(uint32_t stage) {
return reinterpret_cast<ProducerBarrierType*>(&full_barrier_ptr_[stage]);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// TMA store pipeline class
// producer-only class, no async barriers between threads because consumer is TMA unit
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
int Stages_,
// The number of committed TMA store batches that can be in flight upon return of producer acquire
int UnacquiredStages_ = Stages_-1
>
class PipelineTmaStore {
public:
static constexpr uint32_t Stages = Stages_;
static_assert(Stages_ > 0);
static_assert(UnacquiredStages_ >= 0);
static constexpr uint32_t UnacquiredStages = static_cast<uint32_t>(UnacquiredStages_);
using PipelineState = cutlass::PipelineState<Stages>;
struct Params {
bool always_wait = false;
};
CUTLASS_DEVICE
PipelineTmaStore(Params params = {}) : params_(params) {}
////////////////////
// Producer APIs
////////////////////
// Wait for the least recently committed batch of TMA stores to complete
CUTLASS_DEVICE
void producer_acquire(PipelineState state) {
producer_acquire(state.index(), state.count());
}
// Commit the most recently issued batch of TMA stores
CUTLASS_DEVICE
void producer_commit(PipelineState state) {
producer_commit(state.index(), state.count());
}
// Wait for all TMA stores to complete
CUTLASS_DEVICE
void producer_tail([[maybe_unused]] PipelineState state) {
tma_store_wait<0>();
}
private:
Params params_;
// Wait for the least recently committed batch of TMA stores to complete
// or until at most UnacquiredStages TMA store batches are in-flight (if specified)
CUTLASS_DEVICE
void producer_acquire([[maybe_unused]] uint32_t stage, uint32_t count) {
if (params_.always_wait || count > UnacquiredStages) {
tma_store_wait<UnacquiredStages>();
}
}
// Commit the most recently issued batch of TMA stores
CUTLASS_DEVICE
void producer_commit([[maybe_unused]] uint32_t stage, [[maybe_unused]] uint32_t count) {
tma_store_arrive();
}
};
template <>
class PipelineTmaStore< /* Stages_ = */ 0, /* UnacquiredStages = Stages_ - 1 = */ -1 > {
public:
static constexpr uint32_t Stages = 0;
static constexpr uint32_t UnacquiredStages = 0;
using PipelineState = cutlass::PipelineState<Stages>;
struct Params {
bool always_wait = false;
};
PipelineTmaStore() = default;
CUTLASS_DEVICE
PipelineTmaStore(Params params) : params_(params) {}
////////////////////
// Producer APIs
////////////////////
template<class ThisTemplateParameterExistsOnlyForDependentFalse = int>
CUTLASS_DEVICE
void producer_acquire(PipelineState /* state */,
ThisTemplateParameterExistsOnlyForDependentFalse* /* unused */ = nullptr) {
static_assert(cutlass::detail::dependent_false<ThisTemplateParameterExistsOnlyForDependentFalse>,
"It is never valid to call PipelineTmaStore<0>::producer_acquire");
}
// Commit the most recently issued batch of TMA stores
CUTLASS_DEVICE
void producer_commit(PipelineState state) {
producer_commit(state.index(), state.count());
}
// Wait for all TMA stores to complete
CUTLASS_DEVICE
void producer_tail([[maybe_unused]] PipelineState state) {
tma_store_wait<0>();
}
private:
Params params_;
// Commit the most recently issued batch of TMA stores
CUTLASS_DEVICE
void producer_commit([[maybe_unused]] uint32_t stage, [[maybe_unused]] uint32_t count) {
tma_store_arrive();
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Simple producer-consumer async Pipeline class using producer transaction barriers
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <int Stages_>
class PipelineTransactionAsync {
public :
using FullBarrier = cutlass::arch::ClusterTransactionBarrier;
using EmptyBarrier = cutlass::arch::ClusterBarrier;
using ProducerBarrierType = FullBarrier::ValueType;
using ConsumerBarrierType = EmptyBarrier::ValueType;
static constexpr uint32_t Stages = Stages_;
using PipelineState = cutlass::PipelineState<Stages>;
struct SharedStorage {
cute::array<FullBarrier, Stages> full_barrier_;
cute::array<EmptyBarrier, Stages> empty_barrier_;
};
enum class ThreadCategory {
NonParticipant,
Producer,
Consumer,
ProducerConsumer
};
struct Params {
ThreadCategory role = ThreadCategory::NonParticipant;
uint32_t transaction_bytes = 0;
uint32_t producer_arv_count = 1;
uint32_t consumer_arv_count = 1;
uint32_t dst_blockid = cute::block_rank_in_cluster();
};
// Constructor
CUTLASS_DEVICE
PipelineTransactionAsync(SharedStorage& storage, Params const& params)
: params_(params)
, full_barrier_ptr_(storage.full_barrier_.data())
, empty_barrier_ptr_(storage.empty_barrier_.data()) {
int warp_idx = canonical_warp_idx();
int lane_predicate = cute::elect_one_sync();
// Barrier FULL, EMPTY init
// Init is done only by thread 0 of the block
if (warp_idx == 0 && lane_predicate == 1) {
for (int i = 0; i < Stages; ++i) {
full_barrier_ptr_[i].init(params.producer_arv_count);
empty_barrier_ptr_[i].init(params.consumer_arv_count);
}
}
cutlass::arch::fence_barrier_init();
}
////////////////////
// Producer APIs
////////////////////
// Four member functions are always used in pairs:
//
// * producer_try_acquire and producer_acquire, and
// * consumer_try_wait and consumer_wait.
//
// The two functions with "try" in their names are called "try" functions,
// and the other two are conceptually "finalize" functions.
// The "try" function in each pair starts the process of waiting on the barrier to flip.
// It opportunistically waits for an implementation-dependent timeout.
// Whether or not the barrier has flipped yet, the try function will return a token.
// If the token indicates that the barrier has not flipped,
// then the token must be passed into the corresponding "finalize" function.
// The finalize function will then block until the barrier has flipped.
// If the token indicates that the barrier _has_ flipped,
// then it is still correct to pass it into the finalize function.
// The finalize function will return immediately in that case.
CUTLASS_DEVICE
ProducerToken producer_try_acquire(PipelineState state, uint32_t skip_wait = false) {
return producer_try_acquire(state.index(), state.phase(), skip_wait);
}
CUTLASS_DEVICE
void producer_acquire(PipelineState state, ProducerToken barrier_token = {BarrierStatus::WaitAgain}) {
producer_acquire(state.index(), state.phase(), barrier_token);
}
// Perform an expect-tx operation on the stage's full barrier. Must be called by 1 thread
CUTLASS_DEVICE
void producer_expect_transaction(PipelineState state) {
producer_expect_transaction(state.index());
}
CUTLASS_DEVICE
void producer_commit(PipelineState state) {
producer_commit(state.index());
}
// Prevents early exit of producer blocks in Cluster.
// This should be called once before kernel exits.
CUTLASS_DEVICE
void producer_tail(PipelineState state) {
for (int count = 0; count < Stages; ++count) {
producer_acquire(state);
++state;
}
}
CUTLASS_DEVICE
ProducerBarrierType* producer_get_barrier(PipelineState state) {
return producer_get_barrier(state.index());
}
////////////////////
// Consumer APIs
////////////////////
CUTLASS_DEVICE
ConsumerToken consumer_try_wait(PipelineState state, uint32_t skip_wait = false) {
return consumer_try_wait(state.index(), state.phase(), skip_wait);
}
CUTLASS_DEVICE
ConsumerToken consumer_test_wait(PipelineState state, uint32_t skip_wait = false) {
return consumer_test_wait(state.index(), state.phase(), skip_wait);
}
CUTLASS_DEVICE
void consumer_wait(PipelineState state, ConsumerToken barrier_token = {BarrierStatus::WaitAgain}) {
consumer_wait(state.index(), state.phase(), barrier_token);
}
CUTLASS_DEVICE
void consumer_release(PipelineState state) {
consumer_release(state.index());
}
private:
FullBarrier *full_barrier_ptr_ = nullptr;
EmptyBarrier *empty_barrier_ptr_ = nullptr;
Params params_;
CUTLASS_DEVICE
ProducerToken producer_try_acquire(uint32_t stage, uint32_t phase, uint32_t skip_wait) {
if (skip_wait) {
return {BarrierStatus::WaitDone};
}
uint32_t barrier_status = empty_barrier_ptr_[stage].try_wait(phase);
return {static_cast<BarrierStatus>(barrier_status)};
}
CUTLASS_DEVICE
void producer_acquire(uint32_t stage, uint32_t phase, ProducerToken barrier_token) {
if (barrier_token == BarrierStatus::WaitAgain) {
empty_barrier_ptr_[stage].wait(phase);
}
}
// Perform an expect-tx operation on the stage's full barrier. Must be called by 1 thread
CUTLASS_DEVICE
void producer_expect_transaction(uint32_t stage) {
full_barrier_ptr_[stage].expect_transaction(params_.transaction_bytes);
}
CUTLASS_DEVICE
void producer_commit(uint32_t stage) {
full_barrier_ptr_[stage].arrive(params_.dst_blockid);
}
CUTLASS_DEVICE
ProducerBarrierType* producer_get_barrier(uint32_t stage) {
return reinterpret_cast<ProducerBarrierType*>(&full_barrier_ptr_[stage]);
}
CUTLASS_DEVICE
ConsumerToken consumer_try_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) {
if (skip_wait) {
return {BarrierStatus::WaitDone};
}
uint32_t barrier_status = full_barrier_ptr_[stage].try_wait(phase);
return {static_cast<BarrierStatus>(barrier_status)};
}
CUTLASS_DEVICE
ConsumerToken consumer_test_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) {
if (skip_wait) {
return {BarrierStatus::WaitDone};
}
uint32_t barrier_status = full_barrier_ptr_[stage].test_wait(phase);
return {static_cast<BarrierStatus>(barrier_status)};
}
CUTLASS_DEVICE
void consumer_wait(uint32_t stage, uint32_t phase, ConsumerToken barrier_token) {
if (barrier_token == BarrierStatus::WaitAgain) {
full_barrier_ptr_[stage].wait(phase);
}
}
CUTLASS_DEVICE
void consumer_release(uint32_t stage, uint32_t skip = false) {
empty_barrier_ptr_[stage].arrive(params_.dst_blockid, (not skip));
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Simple producer-consumer async Pipeline class
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <int Stages_>
class PipelineAsync {
public :
using FullBarrier = cutlass::arch::ClusterBarrier;
using EmptyBarrier = cutlass::arch::ClusterBarrier;
using ProducerBarrierType = FullBarrier::ValueType;
using ConsumerBarrierType = EmptyBarrier::ValueType;
static constexpr uint32_t Stages = Stages_;
using PipelineState = cutlass::PipelineState<Stages>;
struct SharedStorage {
FullBarrier full_barrier_[Stages];
EmptyBarrier empty_barrier_[Stages];
};
enum class ThreadCategory {
NonParticipant,
Producer,
Consumer,
ProducerConsumer
};
struct Params {
ThreadCategory role = ThreadCategory::NonParticipant;
uint32_t producer_arv_count = 1;
uint32_t consumer_arv_count = 1;
uint32_t dst_blockid = cute::block_rank_in_cluster();
};
// Default assumption when only storage is passed is :
// => single producer, single consumer & they are in the same block (within the Cluster)
CUTLASS_DEVICE
PipelineAsync(SharedStorage& storage)
: PipelineAsync(storage, {}) {}
CUTLASS_DEVICE
PipelineAsync(
SharedStorage& storage,
Params const& params) :
params_(params),
full_barrier_ptr_(&storage.full_barrier_[0]),
empty_barrier_ptr_(&storage.empty_barrier_[0]) {
int warp_idx = canonical_warp_idx();
int lane_predicate = cute::elect_one_sync();
// Barrier FULL, EMPTY init
// Init is done only by thread 0 of the block
if (warp_idx == 0 && lane_predicate == 1) {
for (int i = 0; i < Stages; ++i) {
full_barrier_ptr_[i].init(params.producer_arv_count);
empty_barrier_ptr_[i].init(params.consumer_arv_count);
}
}
cutlass::arch::fence_barrier_init();
}
////////////////////
// Producer APIs
////////////////////
// Four member functions are always used in pairs:
//
// * producer_try_acquire and producer_acquire, and
// * consumer_try_wait and consumer_wait.
//
// The two functions with "try" in their names are called "try" functions,
// and the other two are conceptually "finalize" functions.
// The "try" function in each pair starts the process of waiting on the barrier to flip.
// It opportunistically waits for an implementation-dependent timeout.
// Whether or not the barrier has flipped yet, the try function will return a token.
// If the token indicates that the barrier has not flipped,
// then the token must be passed into the corresponding "finalize" function.
// The finalize function will then block until the barrier has flipped.
// If the token indicates that the barrier _has_ flipped,
// then it is still correct to pass it into the finalize function.
// The finalize function will return immediately in that case.
CUTLASS_DEVICE
ProducerToken producer_try_acquire(PipelineState state, uint32_t skip_wait = false) {
return producer_try_acquire(state.index(), state.phase(), skip_wait);
}
CUTLASS_DEVICE
void producer_acquire(PipelineState state, ProducerToken barrier_token = {BarrierStatus::WaitAgain}) {
producer_acquire(state.index(), state.phase(), barrier_token);
}
CUTLASS_DEVICE
void producer_commit(PipelineState state) {
producer_commit(state.index());
}
template<class UserDefinedArriveOp>
CUTLASS_DEVICE
void producer_commit(PipelineState state, UserDefinedArriveOp&& user_defined_arrive_op) {
cute::forward<UserDefinedArriveOp>(user_defined_arrive_op)(producer_get_barrier(state.index()));
producer_commit(state);
}
// Prevents early exit of producer blocks in Cluster.
// This should be called once before kernel exits.
CUTLASS_DEVICE
void producer_tail(PipelineState state) {
for (int count = 0; count < Stages; ++count) {
producer_acquire(state);
++state;
}
}
CUTLASS_DEVICE
ProducerBarrierType* producer_get_barrier(PipelineState state) {
return producer_get_barrier(state.index());
}
////////////////////
// Consumer APIs
////////////////////
CUTLASS_DEVICE
ConsumerToken consumer_try_wait(PipelineState state, uint32_t skip_wait = false) {
return consumer_try_wait(state.index(), state.phase(), skip_wait);
}
CUTLASS_DEVICE
ConsumerToken consumer_test_wait(PipelineState state, uint32_t skip_wait = false) {
return consumer_test_wait(state.index(), state.phase(), skip_wait);
}
CUTLASS_DEVICE
void consumer_wait(PipelineState state, ConsumerToken barrier_token = {BarrierStatus::WaitAgain}) {
consumer_wait(state.index(), state.phase(), barrier_token);
}
CUTLASS_DEVICE
void consumer_release(PipelineState state) {
consumer_release(state.index());
}
private:
Params params_;
FullBarrier *full_barrier_ptr_;
EmptyBarrier *empty_barrier_ptr_;
CUTLASS_DEVICE
ProducerToken producer_try_acquire(uint32_t stage, uint32_t phase, uint32_t skip_wait) {
if (skip_wait) {
return {BarrierStatus::WaitDone};
}
uint32_t barrier_status = empty_barrier_ptr_[stage].try_wait(phase);
return {static_cast<BarrierStatus>(barrier_status)};
}
CUTLASS_DEVICE
void producer_acquire(uint32_t stage, uint32_t phase, ProducerToken barrier_token) {
if (barrier_token == BarrierStatus::WaitAgain) {
empty_barrier_ptr_[stage].wait(phase);
}
}
CUTLASS_DEVICE
void producer_commit(uint32_t stage) {
full_barrier_ptr_[stage].arrive();
}
CUTLASS_DEVICE
ProducerBarrierType* producer_get_barrier(uint32_t stage) {
return reinterpret_cast<ProducerBarrierType*>(&full_barrier_ptr_[stage]);
}
CUTLASS_DEVICE
ConsumerToken consumer_try_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) {
if (skip_wait) {
return {BarrierStatus::WaitDone};
}
uint32_t barrier_status = full_barrier_ptr_[stage].try_wait(phase);
return {static_cast<BarrierStatus>(barrier_status)};
}
CUTLASS_DEVICE
ConsumerToken consumer_test_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) {
if (skip_wait) {
return {BarrierStatus::WaitDone};
}
uint32_t barrier_status = full_barrier_ptr_[stage].test_wait(phase);
return {static_cast<BarrierStatus>(barrier_status)};
}
CUTLASS_DEVICE
void consumer_wait(uint32_t stage, uint32_t phase) {
uint32_t done = full_barrier_ptr_[stage].test_wait(phase);
if (!done) {
full_barrier_ptr_[stage].wait(phase);
}
}
CUTLASS_DEVICE
void consumer_wait(uint32_t stage, uint32_t phase, ConsumerToken barrier_token) {
if (barrier_token == BarrierStatus::WaitAgain) {
full_barrier_ptr_[stage].wait(phase);
}
}
CUTLASS_DEVICE
void consumer_release(uint32_t stage) {
empty_barrier_ptr_[stage].arrive(params_.dst_blockid);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Barrier to ensure an Ordered Sequence between
// SequenceLength number of groups (each with group_size participants) executing SequenceDepth Stages
// i.e., for all i < j - only after id "i" arrives at a particular stage "m"
// will the wait() for id "j" succeed for the same stage
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template<int SequenceDepth, int SequenceLength>
class OrderedSequenceBarrier {
public :
using Barrier = cutlass::arch::ClusterBarrier;
struct SharedStorage {
Barrier barrier_[SequenceDepth][SequenceLength];
};
struct Params {
uint32_t group_id;
uint32_t group_size;
};
private :
// In future this Params object can be replaced easily with a CG object
Params params_;
Barrier *barrier_ptr_;
PipelineState<SequenceDepth> stage_;
static constexpr int Depth = SequenceDepth;
static constexpr int Length = SequenceLength;
public:
OrderedSequenceBarrier() = delete;
OrderedSequenceBarrier(const OrderedSequenceBarrier&) = delete;
OrderedSequenceBarrier(OrderedSequenceBarrier&&) = delete;
OrderedSequenceBarrier& operator=(const OrderedSequenceBarrier&) = delete;
OrderedSequenceBarrier& operator=(OrderedSequenceBarrier&&) = delete;
~OrderedSequenceBarrier() = default;
CUTLASS_DEVICE
OrderedSequenceBarrier(SharedStorage& storage, Params const& params) :
params_(params),
barrier_ptr_(&storage.barrier_[0][0]),
// Group 0 - starts with an opposite phase
stage_({0, params.group_id == 0, 0}) {
int warp_idx = canonical_warp_idx();
int lane_predicate = cute::elect_one_sync();
// Barrier FULL, EMPTY init
// Init is done only by the one elected thread of the block
if (warp_idx == 0 && lane_predicate == 1) {
for (int d = 0; d < Depth; ++d) {
for (int l = 0; l < Length; ++l) {
barrier_ptr_[d * Length + l].init(params.group_size);
}
}
}
cutlass::arch::fence_barrier_init();
}
// Wait on a stage to be unlocked
CUTLASS_DEVICE
void wait() {
get_barrier_for_current_stage(params_.group_id).wait(stage_.phase());
}
// Signal completion of Stage and move to the next stage
// (group_id) signals to (group_id+1)
CUTLASS_DEVICE
void arrive() {
int signalling_id = (params_.group_id + 1) % Length;
get_barrier_for_current_stage(signalling_id).arrive();
++stage_;
}
CUTLASS_DEVICE
void advance() {
++stage_;
}
private:
CUTLASS_DEVICE
Barrier& get_barrier_for_current_stage(int group_id) {
return barrier_ptr_[stage_.index() * Length + group_id];
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // end namespace cutlass
| include/cutlass/pipeline/sm90_pipeline.hpp/0 | {
"file_path": "include/cutlass/pipeline/sm90_pipeline.hpp",
"repo_id": "include",
"token_count": 12793
} | 46 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Ell iterator for matrix of indices (ellColInd matrix)
*/
#pragma once
namespace cutlass {
namespace transform {
namespace threadblock {
namespace ell{
constexpr unsigned int SmemPow = 8;
constexpr unsigned int SmemStages = 2;
constexpr unsigned int SmemSize = 1 << SmemPow;
constexpr unsigned int SmemMask = (SmemSize*SmemStages-1);
class SharedStorage{
public:
Array<int, SmemSize*SmemStages> array;
};
class Iterator{
public:
using Layout = layout::PitchLinear;
using LongIndex = typename Layout::LongIndex;
private:
const int *gmem_col_idx_;
int *smem_col_idx_;
const int block_size_;
const int base_idx_;
const int k_shape_;
const int ell_increment_;
const int array_length_;
int col_idx_base_;
int residue_;
int counter_;
int pow2_;
int residue_shape_;
int smem_offset_;
int smem_stage_;
int gmem_offset_;
int lane_;
bool is_pow2_;
bool is_residue_tile_;
public:
CUTLASS_DEVICE
void load_ell_indices(){
for(int i=threadIdx.x; i<SmemSize; i+=blockDim.x){
int idx = (gmem_offset_+i < array_length_) ? gmem_offset_+i : array_length_-1;
int gmem_col_idx = gmem_col_idx_[idx] - base_idx_;
smem_col_idx_[i + smem_stage_ * SmemSize] =
(gmem_col_idx >= 0) ? gmem_col_idx : -1;
}
gmem_offset_ += SmemSize;
smem_stage_ ^= 1;
}
CUTLASS_DEVICE
Iterator(
SharedStorage& shared_storage_base,
const int* col_idx,
const int& block_size,
const int& base_idx,
const int k_shape,
const int& problem_size_k,
const int& ell_stride,
const int& thread_idx)
: residue_(0),
counter_(0),
smem_offset_(0),
smem_stage_(0),
gmem_offset_(0),
block_size_(block_size),
base_idx_(base_idx),
k_shape_(k_shape),
ell_increment_(ell_stride * block_size),
array_length_((problem_size_k + block_size_ - 1) / block_size_),
residue_shape_(problem_size_k % k_shape_),
is_residue_tile_(residue_shape_ != 0),
smem_col_idx_(reinterpret_cast<int*>(&shared_storage_base.array)),
gmem_col_idx_(const_cast<int*>(col_idx)),
lane_(thread_idx % 32) {
load_ell_indices();
__syncthreads();
is_pow2_ = ((block_size_ & (block_size_ - 1)) == 0);
if( is_pow2_ && k_shape <= block_size_ ) lane_ = 0;
col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_;
pow2_ = 0;
while(block_size_ >> (pow2_ + 1)) ++pow2_;
}
CUTLASS_DEVICE
int get_blocksize(){
return block_size_;
}
CUTLASS_DEVICE
Iterator &operator++(){
if(is_residue_tile_){
residue_ += residue_shape_;
is_residue_tile_ = false;
} else {
residue_ += k_shape_;
}
if(residue_ < block_size_){
return *this;
}
if((array_length_ > SmemSize) && (((smem_offset_ >> SmemPow) & 1) != smem_stage_))
load_ell_indices();
if(residue_ == block_size_){
++smem_offset_;
counter_ += ell_increment_;
residue_ = 0;
col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_ - counter_;
return *this;
}
if(is_pow2_){
smem_offset_ += residue_ >> pow2_;
counter_ += (residue_ >> pow2_) * ell_increment_;
residue_ = residue_ & ((1 << pow2_) - 1);
}
else {
smem_offset_ += residue_ / block_size_;
counter_ += (residue_ / block_size_) * ell_increment_;
residue_ %= block_size_;
}
col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_ - counter_;
return *this;
}
CUTLASS_DEVICE
LongIndex get_offset(const int& idx) {
int num_jump_tiles;
if(is_pow2_)
num_jump_tiles = (idx + residue_) >> pow2_;
else
num_jump_tiles = (idx + residue_) / block_size_;
int tmp = __shfl_sync(0xffffffff, col_idx_base_, num_jump_tiles);
return tmp - num_jump_tiles * ell_increment_;
}
CUTLASS_DEVICE
LongIndex get_offset_fast() {
return col_idx_base_;
}
};
}
}
}
}
| include/cutlass/transform/threadblock/ell_iterator.h/0 | {
"file_path": "include/cutlass/transform/threadblock/ell_iterator.h",
"repo_id": "include",
"token_count": 2651
} | 47 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <typename Shape, typename Element, typename Layout, int AdvanceRank,
typename ThreadMap,
bool Dynamic_iterations = false,
int Alignment =
sizeof_bits<Element>::value* ThreadMap::kElementsPerAccess / 8
>
class RegularTileAccessIteratorDirectConv;
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps with dynamic_iterations OFF
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIteratorDirectConv<
Shape_, Element_,
layout::PitchLinear,
AdvanceRank, ThreadMap_, false, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Element type per access
using AccessType = Array<Element, ThreadMap::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: stride_(ref.stride(0) / ThreadMap::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_base));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_num(int num) {
//Do nothing
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv operator++(int) {
RegularTileAccessIteratorDirectConv prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset in the unit of tile.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * Shape::kContiguous +
coord.strided() * ThreadMap::Iterations::kStrided *
ThreadMap::Delta::kStrided * stride_ * ThreadMap::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps with dynamic_iterations ON
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIteratorDirectConv<
Shape_, Element_,
layout::PitchLinear,
AdvanceRank, ThreadMap_,true, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Element type per access
using AccessType = Array<Element, ThreadMap::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
/// Total iterattions in the strided dimension: Dynamic value
int total_iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: stride_(ref.stride(0) / ThreadMap::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_base));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_num(int num) {
total_iteration_strided_ = num;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < total_iteration_strided_) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv operator++(int) {
RegularTileAccessIteratorDirectConv prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset in the unit of tile.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * Shape::kContiguous +
coord.strided() * total_iteration_strided_ * ThreadMap::Delta::kStrided * stride_ *
ThreadMap::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for column major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_,bool Dynamic_iterations, int Alignment >
class RegularTileAccessIteratorDirectConv<
Shape_, Element_,
layout::ColumnMajor,
AdvanceRank, ThreadMap_, Dynamic_iterations , Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIteratorDirectConv<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap_,
Dynamic_iterations>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_num(int num) {
iterator_.set_iteration_num(num);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv operator++(int) {
RegularTileAccessIteratorDirectConv prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for row major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_,bool Dynamic_iterations, int Alignment>
class RegularTileAccessIteratorDirectConv<
Shape_, Element_,
layout::RowMajor,
AdvanceRank, ThreadMap_, Dynamic_iterations, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIteratorDirectConv<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap_,
Dynamic_iterations>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_num(int num) {
iterator_.set_iteration_num(num);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv operator++(int) {
RegularTileAccessIteratorDirectConv prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h/0 | {
"file_path": "include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h",
"repo_id": "include",
"token_count": 6122
} | 48 |
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Code Organization")
[README](../../README.md#documentation) > **Code Organization**
# CUTLASS Code Organization
This document describes the layout of the CUTLASS repository. The main components are:
* **CUTLASS Template Library** - CUDA Templates for Linear Algebra Subroutines and Solvers (header only)
* **CuTe Template Library** - CUTLASS's core vocabulary layout type and associated algebra (header only)
* **CUTLASS Utilities** - Additional templates
* **CUTLASS Instance Library** - instantiations of CUTLASS templates covering the design space
* **CUTLASS Profiler** - CUTLASS Library, Profiler, and Utilities
* **Examples** - SDK examples of CUTLASS Template Library and components
* **Media** - supporting documentation and media content
* **Tests** - test components for CUTLASS Template Library and tools
## CUTLASS Template Library
CUDA Templates for Linear Algebra Subroutines and Solvers is a library of CUDA C++ template classes for
performing efficient matrix computations on NVIDIA GPUs.
Like NVIDIA CUB, the components of CUTLASS are organized hierarchically based on the scope of cooperative
elements. For example, warp-level GEMM components perform a matrix multiply collectively by the
set of threads within a warp. The following figure illustrates each layer.
Components are designed to be usable by client applications accessing functionailty at each scope.
CUTLASS Templates are implemented by header files in the following directory structure:
```
include/ # Top-level include directory. Client applications should target this path.
cutlass/ # CUDA Templates for Linear Algebra Subroutines and Solvers - headers only
arch/ # direct exposure of architecture features (including instruction-level GEMMs)
*
gemm/ # code specialized for general matrix product computations
thread/ # thread-level operators
warp/ # warp-level operators
collective/ # 3.x API operators for all threads a tiled mma/copy are built over
threadblock/ # CTA-level operators
kernel/ # CUDA kernel entry points
device/ # launches kernel(s) over a full device
* # scope-agnostic components and basic vocabulary type definitions for GEMM
layout/ # layout definitions for matrices, tensors, and other mathematical objects in memory
*
reduction/ # bandwidth-limited reduction kernels that do not fit the "gemm" models
thread/ # thread-level operators
warp/ # warp-level operators
threadblock/ # CTA-level operators
kernel/ # CUDA kernel entry points
device/ # launches kernel(s) over a full device
* # scope-agnostic components and basic vocabulary type definitions
transform/ # code specialized for layout, type, and domain transformations
thread/ # thread-level operators
warp/ # warp-level operators
threadblock/ # CTA-level operators
kernel/ # CUDA kernel entry points
device/ # launches kernel(s) over a full device
* # scope-agnostic components and basic vocabulary type definitions
util/ # miscellaneous CUTLASS components
*
* # core vocabulary types and fundamental arithmetic operators
cute / # CuTe Layout, layout algebra, MMA/Copy atoms, tiled MMA/Copy
algorithm/ # Definitions of core operations such as copy, gemm, and operations on cute::tuples
arch/ # Bare bones PTX wrapper structs for copy and math instructions
atom/ # Meta-information either link to or built from arch/ operators
mma_atom.hpp # cute::Mma_Atom and cute::TiledMma
copy_atom.hpp # cute::Copy_Atom and cute::TiledCopy
*sm*.hpp # Arch specific meta-information for copy and math operations
container/ # Core container types used across CuTe, namely, cute::tuple
numeric/ # CuTe's internal numerics implementation
* # Core library types such as Shape, Stride, Layout, Tensor, and associated operations
```
See [Programming Guidelines](/media/docs/programming_guidelines.md) for further details about
conventions and design patterns used throughout CUTLASS.
## CuTe
CuTe is a collection of C++ CUDA template abstractions for defining and operating on hierarchically multidimensional layouts of threads and data. CuTe provides `Layout` and `Tensor` objects that compactly packages the type, shape, memory space, and layout of data, while performing the complicated indexing for the user. This lets programmers focus on the logical descriptions of their algorithms while CuTe does the mechanical bookkeeping for them. With these tools, we can quickly design, implement, and modify all dense linear algebra operations. More documentation
for CuTe can be found in [`/media/docs/cute/`](/media/docs/cute/).
## Tools
The `tools/` directory contains clients of the CUTLASS Template library and includes the following.
## CUTLASS Instance Library
The CUTLASS Instance Library contains instantiations of the above CUTLASS templates covering supported configurations,
data types, block structure, and tile sizes. These instantiations are procedurally generated using a set of
scripts to span the design space.
```
tools/
library/ # static/dynamic library containing all kernel instantiations of interest
# (with some build-level filter switches to compile specific subsets)
include/
cutlass/
library/ # header files for CUTLASS Deliverables Library (in cutlass::library:: namespace)
handle.h # implements a host-side API for launching kernels, similar to cuBLAS
library.h # defines enums and structs to describe the tiled structure of operator instances
manifest.h # collection of all instances
src/
python/
cutlass_library/ # scripts to procedurally generate CUTLASS template instances
gemm_operations.py
library.py
generator.py # entry point of procedural generation scripts - invoked by cmake
manifest.py
```
When CMake is executed, the CUTLASS Instance Library generator scripts are executed to construct a set of
instantiations in `build/tools/library/generated/`.
### CUTLASS Profiler
The CUTLASS Profiler is designed to load the CUTLASS Instance Library and execute all operations contained therein.
This command-line driven application constructs an execution environment for evaluating functionality and performance.
It is implemented in
```
tools/
profiler/
```
and may be built as follows.
```
$ make cutlass_profiler -j
```
[Further details about the CUTLASS Profiler are described here.](/media/docs/profiler.md)
### CUTLASS Utilities
`tools/util/` defines a companion library of headers and sources that support the CUTLASS test programs, examples, and other client applications. Its structure is as follows:
```
tools/
util/
include/
cutlass/
util/ # CUTLASS Utility companion library
reference/ # functional reference implementation of CUTLASS operators
# (minimal consideration for performance)
detail/
*
device/ # device-side reference implementations of CUTLASS operators
thread/
kernel/
*
host/ # host-side reference implementations of CUTLASS operators
*
*
```
[More details about CUTLASS Utilities may be found here.](/media/docs/utilities.md)
## Examples
To demonstrate CUTLASS components, several SDK examples are implemented in `examples/`.
CUTLASS SDK examples apply CUTLASS templates to implement basic computations.
```
examples/
00_basic_gemm/ # launches a basic GEMM with single precision inputs and outputs
01_cutlass_utilities/ # demonstrates CUTLASS Utilities for allocating and initializing tensors
02_dump_reg_smem/ # debugging utilities for printing register and shared memory contents
03_visualize_layout/ # utility for visualizing all layout functions in CUTLASS
04_tile_iterator/ # example demonstrating an iterator over tiles in memory
05_batched_gemm/ # example demonstrating CUTLASS's batched strided GEMM operation
06_splitK_gemm/ # exmaple demonstrating CUTLASS's Split-K parallel reduction kernel
07_volta_tensorop_gemm/ # example demonstrating mixed precision GEMM using Volta Tensor Cores
08_turing_tensorop_gemm/ # example demonstrating integer GEMM using Turing Tensor Cores
10_planar_complex/ # example demonstrating planar complex GEMM kernels
11_planar_complex_array/ # example demonstrating planar complex kernels with batch-specific problem sizes
12_gemm_bias_relu/ # example demonstrating GEMM fused with bias and relu activation function
13_fused_two_gemms/ # example demonstrating two GEMMs fused into one kernel
```
## Media
This directory contains documentation, images, and performance result data which accompanies the CUTLASS library and components.
## Tests
Test programs for CUTLASS. Tests are organized hierarchically, mirroring the organization of source files.
```
test/ # unit tests for CUTLASS Template Library
unit/
arch/
core/
gemm/
device/
kernel/
thread/
threadblock/
warp/
reduction/
kernel/
thread/
transform/
threadblock/
*
```
Tests can be built and run at the top level scope by invoking `make test_unit` or by building
and explicitly executing each individual target, e.g. `cutlass_test_unit_gemm_device`.
Tests are configured to specify appropriate GTest filter strings to avoid running except on
architectures where they are expected to pass. Thus, no tests should fail. The actual number
of tests run may vary over time as more are added.
# Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| media/docs/code_organization.md/0 | {
"file_path": "media/docs/code_organization.md",
"repo_id": "media",
"token_count": 4180
} | 49 |
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS GEMM API")
[README](../../README.md#documentation) > **CUTLASS GEMM API**
# CUTLASS GEMM API
CUTLASS presents a uniform programming model for matrix multiply-accumulate operations at each level of the hierarchy. This document
focuses on device-level, threadblock-level GEMMs, warp-level GEMMs, thread-level GEMMs, and instruction-level GEMMs.
# CUTLASS GEMM Model
CUTLASS implements the basic GEMM triple loop nest with a tiled structure mirroring the execution model hierarchy.
The following pseudocode describes the model for a GEMM kernel targeting a warp-synchronous matrix multiply instruction like
mma.sync. The entire operation is referred to as "Gemm," as it is assumed that an epilogue operation performs the general matrix
update similar to BLAS.
```c++
// cutlass::gemm::device::Gemm
//
for (int cta_n = 0; cta_n < GemmN; cta_n += CtaTileN) { // for each CTA } CTA-level concurrency
for (int cta_m = 0; cta_m < GemmM; cta_m += CtaTileM) { // for each CTA }
//
// cutlass::gemm::threadblock::Mma
//
for (int cta_k = 0; cta_k < GemmK; cta_k += CtaTileK) { // "GEMM mainloop" - no unrolling - one iteration of this loop is one "stage"
//
for (int warp_n = 0; warp_n < CtaTileN; warp_n += WarpTileN) { // for each warp } warp-level concurrency
for (int warp_m = 0; warp_m < CtaTileM; warp_m += WarpTileM) { // for each warp }
//
for (int warp_k = 0; warp_k < CtaTileK; warp_k += WarpTileK) { // fully unroll across CtaTileK - one iteration of this loop is one "k Group"
//
for (int mma_k = 0; mma_k < WarpTileK; mma_k += MmaK) { // cutlass::gemm::warp::Mma
for (int mma_n = 0; mma_n < WarpTileN; mma_n += MmaN) { //
for (int mma_m = 0; mma_m < WarpTileM; mma_m += MmaM) { //
//
mma_instruction(d, a, b, c); // cutlass::arch::mma - warp-wide matrix multiply instruction
} // for mma_m
} // for mma_n
} // for mma_k
} // for warp_k
} // for warp_m
} // for warp_n
} // for cta_k
} // for cta_m
} // for cta_n
```
The outer-most loops correspond to CTA-level hardware concurrency and are not explicitly written as loops in the code. These
are implied by CUDA grid launch semantics.
The comment `cutlass::gemm::threadblock::Mma` refers to the threadblock-scoped matrix multiply-accumulate concept. This is
the computation performed by one threadblock to compute a matrix product in registers. The "GEMM main loop" is listed.
The comment `cutlass::gemm::warp::Mma` refers to the computation performed by each warp. This is a nested loop executing a
sequence of accumulated outer products.
The inner-most operation corresponds directly to hardware support. In this example, the nested structure terminates with
warp-synchronous matrix multiply instructions targeting Tensor Cores.
Alternatively, GEMMs targeting single-thread instructions may have an additional series of nested loops corresponding to
thread-level concurrency.
# CUTLASS GEMM Components
This loop nest is expressed in CUTLASS via the following components which are specialized for data type, layout, and
math instruction.
![ALT](/media/images/cutlass-gemm-components.png "CUTLASS GEMM Components")
These components are described in the following sections.
## Device-wide GEMM API
The device-level GEMM API is intended to streamline instantiation and execution of the standard
GEMM computation across the GPU. This operator is intended to be used in host-side .cu code and
has semantics similar to cuBLAS.
The device-wide GEMM API is embodied by the following operators:
- [cutlass::gemm::device::Gemm](/include/cutlass/gemm/device/gemm.h) - basic GEMM operation
- [cutlass::gemm::device::GemmArray](/include/cutlass/gemm/device/gemm_array.h) - batched GEMM operation in which input matrices are read from arrays of pointers
- [cutlass::gemm::device::GemmBatched](/include/cutlass/gemm/device/gemm_batched.h) - batched GEMM operation in which input matrices are separated by a constant stride
- [cutlass::gemm::device::GemmSplitKParallel](/include/cutlass/gemm/device/gemm_splitk_parallel.h) - GEMM operation that partitions the GEMM K dimension then launches a separate reduction kernel
**Example:** launch a mixed-precision GEMM targeting Volta Tensor Cores.
```c++
using Gemm = cutlass::gemm::device::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor, // LayoutOutput
float, // ElementAccumulator
cutlass::arch::OpClassTensorOp, // tag indicating Tensor Cores
cutlass::arch::Sm70 // tag indicating target GPU compute architecture
>;
Gemm gemm_op;
cutlass::Status status;
//
// Launch GEMM on the device
//
status = gemm_op({
{m, n, k},
{ptrA, lda},
{ptrB, ldb},
{ptrC, ldc},
{ptrD, ldd},
{alpha, beta}
});
if (status != cutlass::Status::kSuccess) {
return -1;
}
```
## Threadblock-level GEMM API
GEMMs at this scope are expected to efficiently load tiles of data from global memory into internal storage and then compute matrix
products with warp-level GEMM operators.
The threadblock-scoped matrix multiply operation is embodied by
[cutlass::gemm::threadblock::MmaPipelined](/include/cutlass/gemm/threadblock/mma_pipelined.h).
This is a class inspired by [std::transform_reduce()](https://en.cppreference.com/w/cpp/algorithm/transform_reduce)
which computes the accumulated matrix product of a range of tiles defined by tile iterators.
![ALT](/media/images/cutlass-threadblock-mma-pipelined.png "cutlass::gemm::threadblock::MmaPipelined")
In the case of GEMM, the tile iterators are
[cutlass::transform::threadblock::PredicatedTileIterator](/include/cutlass/transform/threadblock/predicated_tile_iterator.h)
to traverse a sequence of tiles in global memory with appropriate predication to avoid out-of-bounds
memory accesses.
*Concept.* Threadblock-level matrix multiply accumulate operators are function objects satisfying the following concept.
```c++
struct Mma {
/// Shape of warp-level matrix operation (concept: GemmShape)
struct Shape;
/// Data type of multiplicand A (concept: numeric type)
struct ElementA;
/// Layout of multiplicand A (concept: Layout)
struct LayoutA;
/// Data type of multiplicand B (concept: numeric type)
struct ElementB;
/// Layout of multiplicand B (concept: Layout)
struct LayoutB;
/// Data type of accumulator matrix C (concept: numeric type)
struct ElementC;
/// Layout of accumulator matrix C (concept: Layout)
struct LayoutC;
/// Iterator of A operand in shared memory - satisfies: ReadableRandomAccessTileIteratorConcept
struct IteratorA;
/// Fragment object loaded from IteratorA (concept: Array<ElementA, ..>)
struct FragmentA;
/// Iterator of B operand in shared memory - satisfies: ReadableRandomAccessTileIteratorConcept
struct IteratorB;
/// Fragment object loaded from IteratorB (concept: Array<ElementB, ..>)
struct FragmentB;
/// Iterator of C operand in shared memory -
/// satisfies: ReadableRandomAccessTileIteratorConcept | WriteableRandomAccessTileIteratorConcept
struct IteratorC;
/// Fragment object loaded from IteratorC (concept: Array<ElementC, ..>)
struct FragmentC;
/// Warp-level matrix multiply operator (concept: satisfies gemm::warp::Mma)
struct Operator;
//
// Method
//
/// Computes a matrix product accumulated in D
CUTLASS_DEVICE
void operator()(
FragmentC &D,
IteratorA iter_A,
IteratorB iter_B,
FragmentC const &C);
};
```
## Warp-level Matrix Multiply API
Warp-level GEMM operators load tiles from shared memory into registers and then compute matrix multiplies using either
Tensor Cores or CUDA Cores. The result is accumulated in a register tile. Iterators are defined for each
operand `A`, `B`, and `C`.
The warp-level GEMM API is a generalization of CUDA's WMMA API to achieve the following objectives:
- native matrix multiply sizes of Tensor Cores
- permuted shared memory layouts to ensure conflict-free accesses
- pointer initilization outside of the mainloop
- efficient traversal
Defining a warp-level matrix multiply in CUTLASS is similar to WMMA as shown below.
![ALT](/media/images/cutlass-warp-level-gemm-api-instantiation.png "CUTLASS vs WMMA API")
The usage model is also similar. The following example computes a warp-level GEMM operation,
accumulating a series of matrix products in a register-backed array. The input to a warp-level
GEMM operation in CUTLASS _must_ be data in shared memory loaded by iterators or on
register-backed fragments.
![ALT](/media/images/cutlass-warp-level-gemm-operation.png "CUTLASS warp-level GEMM API")
```c++
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using WarpMma = typename cutlass::gemm::warp::DefaultMmaTensorOp<
cutlass::gemm::GemmShape<64, 64, 8>, // Overall warp-level GEMM operation
cutlass::gemm::GemmShape<16, 8, 8>, // Target instruction
cutlass::half_t, LayoutA, // operand A type and layout
cutlass::half_t, LayoutB, // operand B type and layout
float, // accumulator type
cutlass::layout::RowMajor>::Type; // accumulator layout
//
// Define a GEMM operation loading data from shared memory
//
int const kGemmK = 32;
__shared__ ElementA smem_buffer_A[WarpMma::Shape::kM * kGemmK];
__shared__ ElementB smem_buffer_B[WarpMma::Shape::kN * kGemmK];
//
// Construct iterators into SMEM tiles
//
// leading dimensions inferred from matrix problem size
int lda = WarpMma::Shape::kM;
int ldb = WarpMma::Shape::kN;
// iterators into shared memory
WarpMma::IteratorA warp_iterator_A({smem_buffer_A, lda});
WarpMma::IteratorB warp_iterator_B({smem_buffer_B, ldb});
// Fragments in registers storing the operands
FragmentA frag_A;
FragmentB frag_B;
FragmentC accum;
WarpMma mma;
accum.clear();
//
// Accumulated outer product
//
#pragma unroll 1
for (int k = 0; k < kGemmK; k += WarpMma::Shape::kK) {
iter_A.load(frag_A); // Load fragments from A and B matrices
iter_B.load(frag_B);
++iter_A; ++iter_B; // Advance along GEMM K to next tile in A
// and B matrices
// Compute matrix product
mma(accum, frag_A, frag_B, accum);
}
```
*Concept.* Warp-level Mma operations are function objects satisfying the following concept.
```c++
struct Mma {
/// Shape of warp-level matrix operation (concept: GemmShape)
struct Shape;
/// Data type of multiplicand A (concept: numeric type)
struct ElementA;
/// Layout of multiplicand A (concept: Layout)
struct LayoutA;
/// Data type of multiplicand B (concept: numeric type)
struct ElementB;
/// Layout of multiplicand B (concept: Layout)
struct LayoutB;
/// Data type of accumulator matrix C (concept: numeric type)
struct ElementC;
/// Layout of accumulator matrix C (concept: Layout)
struct LayoutC;
/// Iterator of A operand in shared memory - satisfies: ReadableRandomAccessTileIteratorConcept
struct IteratorA;
/// Fragment object loaded from IteratorA (concept: Array<ElementA, ..>)
struct FragmentA;
/// Iterator of B operand in shared memory - satisfies: ReadableRandomAccessTileIteratorConcept
struct IteratorB;
/// Fragment object loaded from IteratorB (concept: Array<ElementB, ..>)
struct FragmentB;
/// Iterator of C operand in shared memory -
/// satisfies: ReadableRandomAccessTileIteratorConcept | WriteableRandomAccessTileIteratorConcept
struct IteratorC;
/// Fragment object loaded from IteratorC (concept: Array<ElementC, ..>)
struct FragmentC;
/// Indicates class of matrix operator (arch::OpClassSimt or arch::OpClassTensorOp)
struct OperatorClass;
//
// Methods
//
/// Computes a matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
FragmentC &D,
IteratorA A,
IteratorB B,
FragmentC const &C);
};
```
*Tensor Core Operators.* Warp-level matrix multiply operators targeting Tensor Cores
may be defined with the following template arguments. The `Policy` type specifies implementation-level details which may
be used to affect performance or internal implementation of the warp-level operator.
```c++
namespace cutlass {
namespace gemm {
namespace warp {
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Used for partial specialization
typename Enable = bool
>
class MmaTensorOp {}
} // namespace warp
} // namespace gemm
} // namespace cutlass
```
*SIMT Math Instructions.* Warp-level matrix multiply operators targeting CUDA Cores
may be defined with the following template arguments. The `Policy` type specifies implementation-level details which may
be used to affect performance or internal implementation of the warp-level operator.
```c++
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Used for partial specialization
typename Enable = bool
>
class MmaSimt;
```
## Thread-level GEMM API
Thread-level GEMM operations perform matrix multiply-accumulate on data held in registers. These target CUDA Cores exclusively.
*Concept.* Thread-level matrix multiply operations are function objects satisfying the following concept.
```c++
struct Mma {
/// Shape of warp-level matrix operation (concept: GemmShape)
struct Shape;
/// Data type of multiplicand A (concept: numeric type)
struct ElementA;
/// Layout of multiplicand A (concept: Layout)
struct LayoutA;
/// Fragment object loaded from IteratorA (concept: Array<ElementA, ..>)
struct FragmentA;
/// Data type of multiplicand B (concept: numeric type)
struct ElementB;
/// Layout of multiplicand B (concept: Layout)
struct LayoutB;
/// Fragment object loaded from IteratorA (concept: Array<ElementB, ..>)
struct FragmentB;
/// Data type of accumulator matrix C (concept: numeric type)
struct ElementC;
/// Layout of accumulator matrix C (concept: Layout)
struct LayoutC;
/// Fragment object loaded from IteratorA (concept: Array<ElementC, ..>)
struct FragmentC;
//
// Methods
//
/// Computes a matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C);
};
```
The CUTLASS thread-level GEMM template accepts the following template arguments.
```c++
namespace cutlass {
namespace gemm {
namespace thread {
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Concept: arch::OpMultiplyAdd or arch::Mma<>
typename Operator = arch::OpMultiplyAdd,
/// Used for partial specialization
typename Enable = bool
>
struct Mma;
} // namespace thread
} // namespace gemm
} // namespace cutlass
```
## Efficient Epilogue
CUTLASS GEMM operators perform mma followed by epilogue operation similar
to cuBLAS. CUTLASS implements an efficient row-major epilogue. Thus, to achieve
column-major GEMM, operands A & B are transposed and swapped.
To enable efficient row-major epilogue for both row-major and column-major output layout,
CUTLASS' device-level GEMM operators `cutlass::device::Gemm` and `cutlass::device::GemmUniversal`
provide two template definitions:
- (a) [General definition](/include/cutlass/gemm/device/gemm.h#L217)
- (b) [Specialized definition for column-major source/output](/include/cutlass/gemm/device/gemm.h#L545)
Efficient row-major epilogue for:
- (i) GEMM operator on row-major source/output uses template (a). It runs row-major GEMM and
an efficient row-major epilogue.
- (ii) GEMM operator on column-major source/output uses template (b). It transposes and swaps
operands A and B to enable efficient epilogue. `A x B = C => Transpose(B) x Transpose(A) = Transpose(C)`.
For column-major source (C) matrix, Transpose(C) is row-major, and efficient epilogue works on
row-major.
Note that cuBLAS typically expects a column-major source (C) and output matrix (D). Thus,
CUTLASS library only instantiates and generates GEMM operatos with column-major layout. However,
CUTLASS by itself can run both row-major and column-major output layouts for all combinations
of input layouts. Thus, CUTLASS supports the following layout combinations for input and output layouts:
- `{N,T} x {N,T} => {N,T}` - NN, TN, TN, TT GEMM for both row-major and column-major output
## Instruction-level operations
CUTLASS defines a template-based interface to Tensor Core operations to avoid resorting
to inline PTX.
- [mma_sm70.h](/include/cutlass/arch/mma_sm70.h) - Volta TensorCore operations
- [mma_sm75.h](/include/cutlass/arch/mma_sm75.h) - Turing TensorCore operations
# Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| media/docs/gemm_api.md/0 | {
"file_path": "media/docs/gemm_api.md",
"repo_id": "media",
"token_count": 7618
} | 50 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Python AST frontend that parses input into DAG IR
"""
import ast
import inspect
import textwrap
from cutlass_library import DataType
import cutlass
from cutlass.backend.evt.frontend.frontend_base import EVTFrontendBase
from cutlass.backend.epilogue import relu
from cutlass.backend.library import FunctionalOp
class PythonASTFrontend(EVTFrontendBase, ast.NodeVisitor):
def __init__(self, element_compute=DataType.f32, **kwargs):
super().__init__(element_compute, **kwargs)
# Flags
# If this state is True, visit_Constant returns values without creating imm node
self.no_imm = False
self.visiting_return = False
def parse(self, example_inputs):
self.example_inputs = example_inputs
self.source = textwrap.dedent(inspect.getsource(self.__call__))
self.ast = ast.parse(self.source)
self.visit(self.ast)
#
# Helper functions
#
@staticmethod
def ast_op_to_bindings(op):
mapping = {
ast.Add: FunctionalOp.Plus,
ast.Sub: FunctionalOp.Minus,
ast.Mult: FunctionalOp.Multiplies,
ast.Div: FunctionalOp.Divides,
"relu": relu.binding_type,
"multiply_add": FunctionalOp.MultiplyAdd,
"sum": (FunctionalOp.Plus, FunctionalOp.AtomicAdd),
"max": (FunctionalOp.Maximum, FunctionalOp.AtomicMaximum)
}
return mapping[op]
#
# Visiting different node types
#
def visit_FunctionDef(self, node: ast.FunctionDef):
# Visit args and register load nodes
for arg in node.args.args:
self.visit(arg)
for expr in node.body:
self.visit(expr)
def visit_arg(self, node: ast.arg):
# Name of the argument
name = node.arg
try:
example_tensor = self.example_inputs[name]
except:
raise RuntimeError(f"Example input for {name} is not provided.")
self.add_load_node(name, example_tensor)
def visit_Name(self, node: ast.Name):
return node.id
def visit_Constant(self, node: ast.Constant):
if self.no_imm:
return node.value
else:
name = self.add_imm(node.value)
return name
def visit_Tuple(self, node: ast.Tuple):
results = []
for elt in node.elts:
results.append(self.visit(elt))
return tuple(results)
def visit_keyword(self, node: ast.keyword):
return {node.arg: self.visit(node.value)}
def visit_BinOp(self, node: ast.BinOp):
if self.visiting_return:
raise SyntaxError("Return value cannot be an expression")
lhs = self.visit(node.left)
rhs = self.visit(node.right)
op = self.ast_op_to_bindings(type(node.op))
name = self.add_compute_node(op)
# Add edges
# The edge weights are used to sort the input args
self.add_edge(lhs, name, weight=0)
self.add_edge(rhs, name, weight=1)
return name
def visit_Assign(self, node: ast.BinOp):
target = self.visit(node.targets[0])
value = self.visit(node.value)
# Create the assign node
self.add_store_node(target)
# Add edges
self.add_edge(value, target)
return target
def visit_Call(self, node: ast.Call):
if self.visiting_return:
raise SyntaxError("Return value cannot be an expression")
func = self.visit(node.func)
args = [self.visit(arg) for arg in node.args]
if func in self.layout_fns.keys():
# Parse kwargs
# By default, visiting imm automatically creates a load node
# However, in function call, keyword args are used to set
# specific function attributes such as indices for permute
# So no_imm is set to True temporarily
self.no_imm = True
kwargs = {}
for kw in node.keywords:
kwargs.update(self.visit(kw))
self.no_imm = False
op = self.layout_fns[func]
name = self.add_layout_node(op, kwargs)
else:
op = self.ast_op_to_bindings(func)
name = self.add_compute_node(op)
# Add edges
for idx, arg in enumerate(args):
self.add_edge(arg, name, weight=idx)
return name
def visit_Return(self, node: ast.Return):
self.visiting_return = True
results = self.visit(node.value)
self.visiting_return = False
self.return_names = results
if not isinstance(results, tuple):
results = (results,)
for rst in results:
try:
example_tensor = self.example_inputs[rst]
except:
raise RuntimeError(f"Example input for {rst} is not provided.")
self.set_store_tensor(rst, example_tensor)
self.mark_output(rst)
| python/cutlass/backend/evt/frontend/python_ast.py/0 | {
"file_path": "python/cutlass/backend/evt/frontend/python_ast.py",
"repo_id": "python",
"token_count": 2728
} | 51 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Eliminate layout manipulation nodes
"""
from copy import deepcopy
from cutlass.backend.evt.ir import DAGIR, LayoutNode
from cutlass.backend.evt.passes.pass_manager import EVTPassBase
from cutlass.backend.evt.passes.pass_shape_type_propagation import PassShapeTypePropagation
class PassLayoutManipulateElimination(EVTPassBase):
"""
Eliminate layout manipulation nodes
"""
dependencies = [PassShapeTypePropagation]
def __init__(self, dag_ir: DAGIR) -> None:
super().__init__(dag_ir)
self.copy_cnt = 0
def call(self):
self.layout_nodes_worklist = self.get_all_layout_nodes()
# Run while loop utill all layout nodes are eliminated
while(len(self.layout_nodes_worklist) > 0):
node = self.layout_nodes_worklist.pop(0)
# for node in layout_nodes:
# Step 1: get the propagation direction
direction = self.get_propagation_direction(node)
self.visited = []
getattr(self, f"propagate_to_{direction}")(self.dag_ir.get_node_meta(node), node)
# Eliminate the current node
input_node = self.dag_ir.get_all_inputs(node)[0]
self.dag_ir.replace_all_uses_with(node, input_node)
# layout_nodes = self.get_all_layout_nodes()
def get_all_layout_nodes(self):
layout_nodes = []
for node_meta in reversed(self.dag_ir.node_metas_topological_order()):
if isinstance(node_meta, LayoutNode):
layout_nodes.append(node_meta.name)
return layout_nodes
def get_propagation_direction(self, node: str):
"""
The logic is propagating all layout nodes away from the accumulator node.
"""
self.visited = []
self.get_influenced_users(node)
nodes_influenced_dir_users = self.visited
self.visited = []
self.get_influenced_inputs(node)
nodes_influenced_dir_inputs = self.visited
if "accum" in nodes_influenced_dir_users and "accum" not in nodes_influenced_dir_inputs:
return "inputs"
elif "accum" not in nodes_influenced_dir_users and "accum" in nodes_influenced_dir_inputs:
return "users"
else:
raise RuntimeError("Unsolved propagation direction")
# Get all influenced nodes if we propagate along the user direction
def get_influenced_users(self, node: str):
if node in self.visited:
return
self.visited.append(node)
users = self.dag_ir.get_users(node)
for user in users:
self.get_influenced_users(user)
user_inputs = []
for user in users:
user_inputs.append(set(self.dag_ir.get_all_inputs(user)))
if len(user_inputs) > 0:
user_inputs = set.union(*user_inputs)
user_inputs.remove(node)
for input in user_inputs:
self.get_influenced_inputs(input)
# Get all influenced nodes if we propagate along the input direction
def get_influenced_inputs(self, node: str):
if node in self.visited:
return
self.visited.append(node)
inputs = self.dag_ir.get_all_inputs(node)
for input in inputs:
self.get_influenced_inputs(input)
input_users = []
for input in inputs:
input_users.append(set(self.dag_ir.get_users(input)))
if len(input_users) > 0:
input_users = set.union(*input_users)
input_users.remove(node)
for user in input_users:
self.get_influenced_users(user)
def add_copy_before(self, layout_node_meta: LayoutNode, target: str):
copied_node_meta = deepcopy(layout_node_meta)
copied_node = f"{copied_node_meta.name}_copy{self.copy_cnt}"
self.copy_cnt += 1
copied_node_meta.name = copied_node
self.dag_ir.add_node(copied_node_meta)
# Add edges
target_inputs = self.dag_ir.get_all_inputs(target)
for src in target_inputs:
self.dag_ir.remove_edge(src, target)
self.dag_ir.add_edge(src, copied_node)
self.dag_ir.add_edge(copied_node, target)
self.layout_nodes_worklist.append(copied_node)
def add_copy_after(self, layout_node_meta: LayoutNode, target: str):
copied_node_meta = deepcopy(layout_node_meta)
copied_node = f"{copied_node_meta.name}_copy{self.copy_cnt}"
self.copy_cnt += 1
copied_node_meta.name = copied_node
self.dag_ir.add_node(copied_node_meta)
# Add edges
users = self.dag_ir.get_users(target)
for user in users:
self.dag_ir.remove_edge(target, user)
self.dag_ir.add_edge(copied_node, user)
self.dag_ir.add_edge(target, copied_node)
self.layout_nodes_worklist.append(copied_node)
# Propagate the layout `node` along the user direction
def propagate_to_users(self, layout_node_meta: LayoutNode, node: str):
"""
Propagate layout node to users
"""
if node in self.visited:
# Avoid applying twice
return
self.visited.append(node)
node_meta = self.dag_ir.get_node_meta(node)
if layout_node_meta.name != node:
if isinstance(node_meta, LayoutNode):
# Layout node is not transparent with layout node
self.add_copy_before(layout_node_meta, node)
return
else:
layout_node_meta.apply_to_user(node_meta)
users = self.dag_ir.get_users(node)
user_inputs = []
for user in users:
user_inputs.append(set(self.dag_ir.get_all_inputs(user)))
for user in users:
self.propagate_to_users(layout_node_meta, user)
if len(user_inputs) > 0:
user_inputs = set.union(*user_inputs)
user_inputs.remove(node)
for input in user_inputs:
self.propagate_to_inputs(layout_node_meta.get_inverse_node(), input)
# Propagate the layout `node` along the input direction
def propagate_to_inputs(self, layout_node_meta: LayoutNode, node: str):
"""
Propagate layout node to inputs
"""
if node in self.visited:
# Avoid applying twice
return
self.visited.append(node)
node_meta = self.dag_ir.get_node_meta(node)
if layout_node_meta.name != node:
if isinstance(node_meta, LayoutNode):
# Layout node is not transparent with layout node
self.add_copy_after(layout_node_meta, node)
return
else:
layout_node_meta.apply_to_input(node_meta)
inputs = self.dag_ir.get_all_inputs(node)
input_users = []
for input in inputs:
input_users.append(set(self.dag_ir.get_users(input)))
for input in inputs:
self.propagate_to_inputs(layout_node_meta, input)
if len(input_users) > 0:
input_users = set.union(*input_users)
input_users.remove(node)
for user in input_users:
self.propagate_to_users(layout_node_meta.get_inverse_node(), user)
| python/cutlass/backend/evt/passes/pass_layout_elimination.py/0 | {
"file_path": "python/cutlass/backend/evt/passes/pass_layout_elimination.py",
"repo_id": "python",
"token_count": 3853
} | 52 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utility functions for converting between frontend datatypes and CUTLASS datatypes
"""
import cutlass
from cutlass_library import (
DataTypeSize,
MathOperation,
MathInstruction
)
from cutlass.backend.library import (
TileDescription,
)
bfloat16_available = None
cupy_available = None
numpy_available = None
torch_available = None
_library_to_cupy_dict = None
_library_to_numpy_dict = None
_library_to_torch_dict = None
_torch_to_library_dict = None
def is_numpy_available():
global numpy_available, _library_to_numpy_dict
if numpy_available is None:
try:
import numpy as np
numpy_available = True
_library_to_numpy_dict = {
cutlass.DataType.f16: np.float16,
cutlass.DataType.f32: np.float32,
cutlass.DataType.f64: np.float64,
cutlass.DataType.s8: np.int8,
cutlass.DataType.s32: np.int32,
}
except ImportError:
numpy_available = False
_library_to_numpy_dict = {}
return numpy_available
def is_numpy_tensor(inp) -> bool:
if is_numpy_available():
import numpy as np
return isinstance(inp, np.ndarray)
return False
def numpy_library_type(inp) -> cutlass.DataType:
if is_numpy_available():
import numpy as np
if inp == np.float16:
return cutlass.DataType.f16
elif inp == np.float32:
return cutlass.DataType.f32
elif inp == np.float64:
return cutlass.DataType.f64
elif inp == np.int8:
return cutlass.DataType.s8
elif inp == np.int32:
return cutlass.DataType.s32
return None
def numpy_type(inp):
return _library_to_numpy_dict.get(inp, None)
def is_cupy_available():
global cupy_available
if cupy_available is None:
try:
import cupy as cp
cupy_available = True
_library_to_cupy_dict = {
cutlass.DataType.f16: cp.float16,
cutlass.DataType.f32: cp.float32,
cutlass.DataType.f64: cp.float64,
cutlass.DataType.s8: cp.int8,
cutlass.DataType.s32: cp.int32,
}
except ImportError:
cupy_available = False
_library_to_cupy_dict = {}
return cupy_available
def is_cupy_tensor(inp) -> bool:
if is_cupy_available():
import cupy as cp
return isinstance(inp, cp.ndarray)
return False
def cupy_library_type(inp) -> cutlass.DataType:
if is_cupy_available():
import cupy as cp
if inp == cp.float16:
return cutlass.DataType.f16
elif inp == cp.float32:
return cutlass.DataType.f32
elif inp == cp.float64:
return cutlass.DataType.f64
return None
def cupy_type(inp):
return _library_to_cupy_dict.get(inp, None)
def is_torch_available():
global torch_available, _library_to_torch_dict, _torch_to_library_dict
if torch_available is None:
try:
import torch
torch_available = True
_torch_to_library_dict = {
torch.half: cutlass.DataType.f16,
torch.float16: cutlass.DataType.f16,
torch.bfloat16: cutlass.DataType.bf16,
torch.float: cutlass.DataType.f32,
torch.float32: cutlass.DataType.f32,
torch.double: cutlass.DataType.f64,
torch.float64: cutlass.DataType.f64,
torch.int8: cutlass.DataType.s8,
torch.int32: cutlass.DataType.s32,
torch.uint8: cutlass.DataType.u8,
}
_library_to_torch_dict = {
cutlass.DataType.f16: torch.half,
cutlass.DataType.f16: torch.float16,
cutlass.DataType.bf16: torch.bfloat16,
cutlass.DataType.f32: torch.float,
cutlass.DataType.f32: torch.float32,
cutlass.DataType.f64: torch.double,
cutlass.DataType.f64: torch.float64,
cutlass.DataType.s8: torch.int8,
cutlass.DataType.s32: torch.int32,
cutlass.DataType.u8: torch.uint8,
}
def possibly_add_type(torch_type_name, cutlass_type):
# Only try adding the type if the version of torch being used supports it
if hasattr(torch, torch_type_name):
torch_type = getattr(torch, torch_type_name)
_torch_to_library_dict[torch_type] = cutlass_type
_library_to_torch_dict[cutlass_type] = torch_type
possibly_add_type("float8_e4m3fn", cutlass.DataType.e4m3)
possibly_add_type("float8_e5m2", cutlass.DataType.e5m2)
except ImportError:
torch_available = False
_torch_to_library_dict = {}
_library_to_torch_dict = {}
return torch_available
def is_torch_tensor(inp) -> bool:
if is_torch_available():
import torch
return isinstance(inp, torch.Tensor)
return False
def torch_library_type(inp) -> cutlass.DataType:
return _torch_to_library_dict.get(inp, None)
def torch_type(inp):
return _library_to_torch_dict.get(inp, None)
def is_bfloat16_available():
global bfloat16_available
if bfloat16_available is None:
try:
import bfloat16
bfloat16_available = True
except ImportError:
bfloat16_available = False
return bfloat16_available
def bfloat16_library_type(inp) -> cutlass.DataType:
if is_bfloat16_available():
import bfloat16
if inp == bfloat16.bfloat16:
return cutlass.DataType.bf16
def bfloat16_type(inp):
if is_bfloat16_available():
import bfloat16
if inp == cutlass.DataType.bf16:
return bfloat16.bfloat16
def library_type(inp):
if inp in DataTypeSize:
return inp
for cvt_fn in [
bfloat16_library_type,
cupy_library_type,
numpy_library_type,
torch_library_type,
]:
out = cvt_fn(inp)
if out is not None:
return out
raise Exception(f"No available conversion from type {inp} to a library type.")
def _tensor_from_numpy(np_tensor):
dtype = library_type(np_tensor.dtype)
if np_tensor.flags.c_contiguous:
layout = cutlass.LayoutType.RowMajor
elif np_tensor.flags.f_contiguous:
layout = cutlass.LayoutType.ColumnMajor
return (dtype, layout)
def _tensor_from_torch(pt_tensor):
dtype = library_type(pt_tensor.dtype)
return (dtype, cutlass.LayoutType.RowMajor)
def get_datatype_and_layout(tensor):
if (is_numpy_tensor(tensor) or is_cupy_tensor(tensor)):
return _tensor_from_numpy(tensor)
elif is_torch_tensor(tensor):
return _tensor_from_torch(tensor)
elif isinstance(tensor, float) or isinstance(tensor, int):
return (cutlass.DataType.f32, cutlass.LayoutType.RowMajor)
else:
raise Exception(f"Unable to convert tensor of type {type(tensor)} to Python-bound CUTLASS datatype and layout.")
def get_tensor_shape(tensor, op="GEMM"):
if (is_numpy_tensor(tensor) or is_cupy_tensor(tensor)):
return tensor.shape
elif is_torch_tensor(tensor):
size = tensor.size()
if op == "CONV":
# PyTorch Tensors have shape NCHW
return (size[0], size[2], size[3], size[1])
else:
return tuple(tensor.size())
elif isinstance(tensor, float) or isinstance(tensor, int):
return (1,)
else:
raise Exception(f"Unable to convert tensor of type {type(tensor)} to Python-bound CUTLASS datatype and layout.")
_math_operation_value_map = {x.value: x for x in MathOperation}
def backend_math_operation(math_op: MathOperation):
if math_op.value not in _math_operation_value_map.keys():
raise Exception(f"Unable to convert math operation of type {math_op} to backend math operation.")
return _math_operation_value_map[math_op.value]
def construct_backend_td(td: cutlass.TileDescription,
kernel_schedule: cutlass.KernelScheduleType,
epilogue_schedule: cutlass.EpilogueScheduleType,
tile_scheduler: cutlass.TileSchedulerType) -> TileDescription:
mi = td.math_instruction
backend_mi = MathInstruction(
mi.instruction_shape,
mi.element_a,
mi.element_b,
mi.element_accumulator,
mi.opcode_class,
backend_math_operation(mi.math_operation)
)
cluster_shape = td.cluster_shape if hasattr(td, "cluster_shape") else [1, 1, 1]
return TileDescription(td.threadblock_shape, td.stages, td.warp_count,
backend_mi, cluster_shape, kernel_schedule, epilogue_schedule, tile_scheduler)
def td_from_profiler_op(op) -> TileDescription:
"""
Converts the profiler's TileDescription in ``op`` into the backend TileDescription
:param op: profiler Operation
:returns: backend TileDescription
:rtype: cutlass.backend.TileDescription
"""
kschedule = op.kernel_schedule if hasattr(op, 'kernel_schedule') else None
eschedule = op.epilogue_schedule if hasattr(op, 'epilogue_schedule') else None
tschedule = op.tile_scheduler if hasattr(op, 'tile_scheduler') else None
return construct_backend_td(op.tile_description, kschedule, eschedule, tschedule)
def td_from_profiler_td(td: TileDescription) -> TileDescription:
"""
Converts the profiler's TileDescription into the backend TileDescription
:param td: profiler TileDescription
:type td: cutlass.TileDescription
:returns: backend TileDescription
:rtype: cutlass.backend.TileDescription
"""
return construct_backend_td(td, kernel_schedule=None, epilogue_schedule=None, tile_scheduler=None)
def to_camel_case(snake_str):
return "".join(x.capitalize() for x in snake_str.lower().split("_"))
def getattr_enum(obj, attr_name):
# The attr_name is under the snake_case
camel_attr = to_camel_case(attr_name)
if hasattr(obj, camel_attr):
return getattr(obj, camel_attr)
else:
raise Exception(f"Invalid option: {attr_name}")
| python/cutlass/utils/datatypes.py/0 | {
"file_path": "python/cutlass/utils/datatypes.py",
"repo_id": "python",
"token_count": 5250
} | 53 |
/* Some sane resets. */
html {
height: 100%;
}
body {
margin: 0;
min-height: 100%;
}
/* All the flexbox magic! */
body,
.sb-announcement,
.sb-content,
.sb-main,
.sb-container,
.sb-container__inner,
.sb-article-container,
.sb-footer-content,
.sb-header,
.sb-header-secondary,
.sb-footer {
display: flex;
}
/* These order things vertically */
body,
.sb-main,
.sb-article-container {
flex-direction: column;
}
/* Put elements in the center */
.sb-header,
.sb-header-secondary,
.sb-container,
.sb-content,
.sb-footer,
.sb-footer-content {
justify-content: center;
}
/* Put elements at the ends */
.sb-article-container {
justify-content: space-between;
}
/* These elements grow. */
.sb-main,
.sb-content,
.sb-container,
article {
flex-grow: 1;
}
/* Because padding making this wider is not fun */
article {
box-sizing: border-box;
}
/* The announcements element should never be wider than the page. */
.sb-announcement {
max-width: 100%;
}
.sb-sidebar-primary,
.sb-sidebar-secondary {
flex-shrink: 0;
width: 17rem;
}
.sb-announcement__inner {
justify-content: center;
box-sizing: border-box;
height: 3rem;
overflow-x: auto;
white-space: nowrap;
}
/* Sidebars, with checkbox-based toggle */
.sb-sidebar-primary,
.sb-sidebar-secondary {
position: fixed;
height: 100%;
top: 0;
}
.sb-sidebar-primary {
left: -17rem;
transition: left 250ms ease-in-out;
}
.sb-sidebar-secondary {
right: -17rem;
transition: right 250ms ease-in-out;
}
.sb-sidebar-toggle {
display: none;
}
.sb-sidebar-overlay {
position: fixed;
top: 0;
width: 0;
height: 0;
transition: width 0ms ease 250ms, height 0ms ease 250ms, opacity 250ms ease;
opacity: 0;
background-color: rgba(0, 0, 0, 0.54);
}
#sb-sidebar-toggle--primary:checked
~ .sb-sidebar-overlay[for="sb-sidebar-toggle--primary"],
#sb-sidebar-toggle--secondary:checked
~ .sb-sidebar-overlay[for="sb-sidebar-toggle--secondary"] {
width: 100%;
height: 100%;
opacity: 1;
transition: width 0ms ease, height 0ms ease, opacity 250ms ease;
}
#sb-sidebar-toggle--primary:checked ~ .sb-container .sb-sidebar-primary {
left: 0;
}
#sb-sidebar-toggle--secondary:checked ~ .sb-container .sb-sidebar-secondary {
right: 0;
}
/* Full-width mode */
.drop-secondary-sidebar-for-full-width-content
.hide-when-secondary-sidebar-shown {
display: none !important;
}
.drop-secondary-sidebar-for-full-width-content .sb-sidebar-secondary {
display: none !important;
}
/* Mobile views */
.sb-page-width {
width: 100%;
}
.sb-article-container,
.sb-footer-content__inner,
.drop-secondary-sidebar-for-full-width-content .sb-article,
.drop-secondary-sidebar-for-full-width-content .match-content-width {
width: 100vw;
}
.sb-article,
.match-content-width {
padding: 0 1rem;
box-sizing: border-box;
}
@media (min-width: 32rem) {
.sb-article,
.match-content-width {
padding: 0 2rem;
}
}
/* Tablet views */
@media (min-width: 42rem) {
.sb-article-container {
width: auto;
}
.sb-footer-content__inner,
.drop-secondary-sidebar-for-full-width-content .sb-article,
.drop-secondary-sidebar-for-full-width-content .match-content-width {
width: 42rem;
}
.sb-article,
.match-content-width {
width: 42rem;
}
}
@media (min-width: 46rem) {
.sb-footer-content__inner,
.drop-secondary-sidebar-for-full-width-content .sb-article,
.drop-secondary-sidebar-for-full-width-content .match-content-width {
width: 46rem;
}
.sb-article,
.match-content-width {
width: 46rem;
}
}
@media (min-width: 50rem) {
.sb-footer-content__inner,
.drop-secondary-sidebar-for-full-width-content .sb-article,
.drop-secondary-sidebar-for-full-width-content .match-content-width {
width: 50rem;
}
.sb-article,
.match-content-width {
width: 50rem;
}
}
/* Tablet views */
@media (min-width: 59rem) {
.sb-sidebar-secondary {
position: static;
}
.hide-when-secondary-sidebar-shown {
display: none !important;
}
.sb-footer-content__inner,
.drop-secondary-sidebar-for-full-width-content .sb-article,
.drop-secondary-sidebar-for-full-width-content .match-content-width {
width: 59rem;
}
.sb-article,
.match-content-width {
width: 42rem;
}
}
@media (min-width: 63rem) {
.sb-footer-content__inner,
.drop-secondary-sidebar-for-full-width-content .sb-article,
.drop-secondary-sidebar-for-full-width-content .match-content-width {
width: 63rem;
}
.sb-article,
.match-content-width {
width: 46rem;
}
}
@media (min-width: 67rem) {
.sb-footer-content__inner,
.drop-secondary-sidebar-for-full-width-content .sb-article,
.drop-secondary-sidebar-for-full-width-content .match-content-width {
width: 67rem;
}
.sb-article,
.match-content-width {
width: 50rem;
}
}
/* Desktop views */
@media (min-width: 76rem) {
.sb-sidebar-primary {
position: static;
}
.hide-when-primary-sidebar-shown {
display: none !important;
}
.sb-footer-content__inner,
.drop-secondary-sidebar-for-full-width-content .sb-article,
.drop-secondary-sidebar-for-full-width-content .match-content-width {
width: 59rem;
}
.sb-article,
.match-content-width {
width: 42rem;
}
}
/* Full desktop views */
@media (min-width: 80rem) {
.sb-article,
.match-content-width {
width: 46rem;
}
.sb-footer-content__inner,
.drop-secondary-sidebar-for-full-width-content .sb-article,
.drop-secondary-sidebar-for-full-width-content .match-content-width {
width: 63rem;
}
}
@media (min-width: 84rem) {
.sb-article,
.match-content-width {
width: 50rem;
}
.sb-footer-content__inner,
.drop-secondary-sidebar-for-full-width-content .sb-article,
.drop-secondary-sidebar-for-full-width-content .match-content-width {
width: 67rem;
}
}
@media (min-width: 88rem) {
.sb-footer-content__inner,
.drop-secondary-sidebar-for-full-width-content .sb-article,
.drop-secondary-sidebar-for-full-width-content .match-content-width {
width: 67rem;
}
.sb-page-width {
width: 88rem;
}
}
| python/docs/_static/skeleton.css/0 | {
"file_path": "python/docs/_static/skeleton.css",
"repo_id": "python",
"token_count": 2359
} | 54 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Functions for manipulating IntTuples
"""
from functools import reduce
from itertools import chain
from typing import Union
from .typing import Integer
def is_int(x):
return isinstance(x, Integer)
def is_tuple(x):
return isinstance(x, tuple)
def flatten(t):
if is_tuple(t):
if len(t) == 0:
return ()
else:
return tuple(i for a in t for i in flatten(a))
else:
return (t,)
def signum(a):
return bool(a > 0) - bool(a < 0)
def product(a):
if is_tuple(a):
return reduce(lambda val,elem : val*product(elem), a, 1)
else:
return a
def inner_product(a, b):
if is_tuple(a): # tuple tuple
assert len(a) == len(b)
return sum(inner_product(x,y) for x,y in zip(a,b))
else: # "int" "int"
assert not is_tuple(b)
return a * b
def tuple_max(a):
if is_tuple(a):
return max(tuple_max(x) for x in a)
else:
return a
def elem_scale(a, b):
if is_tuple(a):
if is_tuple(b): # tuple tuple
assert len(a) == len(b)
return tuple(elem_scale(x,y) for x,y in zip(a,b))
else: # tuple "int"
assert False # Error
else:
if is_tuple(b): # "int" tuple
return elem_scale(a, product(b))
else: # "int" "int"
return a * b
# Inclusive prefix ceil div with output congruent to input a
def shape_div(a, b):
if is_tuple(a):
if is_tuple(b): # tuple tuple
assert len(a) == len(b)
return tuple(shape_div(x,y) for x,y in zip(a,b))
else: # tuple "int"
#r = [shape_div(a[0],b)] + [shape_div(a[i],b := shape_div(b, product(a[i-1]))) for i in range(1,len(a))]
r = []
for v in a:
r.append(shape_div(v,b))
b = shape_div(b,product(v))
return tuple(r)
else:
if is_tuple(b): # "int" tuple
return shape_div(a, product(b))
else: # "int" "int"
assert a % b == 0 or b % a == 0
#return -(-a // b) # Python exclusive impl: "//" is always floor div
if a % b == 0:
return a // b
else:
return signum(a*b)
# Exclusive prefix product with output congruent to input a
def prefix_product(a, init=1):
if is_tuple(a):
if is_tuple(init): # tuple tuple
assert len(a) == len(init)
return tuple(prefix_product(x,i) for x,i in zip(a,init))
else: # tuple "int"
#r = [prefix_product(a[0],init)] + [prefix_product(a[i],init := init * product(a[i-1])) for i in range(1,len(a))]
r = []
for v in a:
r.append(prefix_product(v,init))
init = init * product(v)
return tuple(r)
else:
if is_tuple(init): # "int" tuple
assert False # Error
else: # "int" "int"
return init
def idx2crd(idx, shape, stride=None):
if stride is None:
stride = prefix_product(shape)
if is_tuple(idx):
if is_tuple(shape): # tuple tuple tuple
assert len(idx) == len(shape) and len(idx) == len(stride)
return tuple(idx2crd(i, s, d) for i, s, d in zip(idx,shape,stride))
else: # tuple "int" "int"
assert False # Error
else:
if is_tuple(shape): # "int" tuple tuple
assert len(shape) == len(stride)
return tuple(idx2crd(idx, s, d) for s,d in zip(shape,stride))
else: # "int" "int" "int"
return (idx // stride) % shape
def crd2idx(crd, shape, stride=None):
if stride is None:
stride = prefix_product(shape)
if is_tuple(crd):
if is_tuple(shape): # tuple tuple tuple
assert len(crd) == len(shape) and len(crd) == len(stride)
return sum(crd2idx(c, s, d) for c, s, d in zip(crd, shape, stride))
else: # tuple "int" "int"
assert False, f"crd={crd}, shape={shape}" # Error
else:
if crd is None:
crd = 0
if is_tuple(shape): # "int" tuple tuple
assert len(shape) == len(stride)
result = 0
for i in range(len(shape)-1):
result += crd2idx(crd % product(shape[i]), shape[i], stride[i])
crd = crd // product(shape[i])
return result + crd2idx(crd, shape[-1], stride[-1])
else: # "int" "int" "int"
return crd * stride
# Transform crd into the dst_shape's iteration space
def crd2crd(crd, dst_shape, src_shape=None):
if is_tuple(crd):
if is_tuple(dst_shape): # tuple tuple
assert len(crd) == len(dst_shape)
return tuple(crd2crd(x, y) for x, y in zip(crd,dst_shape))
else: # tuple "int"
# Ambiguous unless we have src_shape
assert src_shape is not None
return crd2idx(crd, src_shape)
else:
if is_tuple(dst_shape): # "int" tuple
return idx2crd(crd, dst_shape)
else: # "int" "int"
assert crd < dst_shape
return crd
# Filter trg according to crd: keep only elements of trg that are paired with None
def slice_(crd: Union[None, tuple, int],
trg: Union[tuple, int]):
if is_tuple(crd):
if is_tuple(trg): # tuple tuple
assert len(crd) == len(trg)
# match C++ behavior of `filter_tuple` using `tuple_cat(...)`
return tuple(chain(*filter(lambda x: x != (), [slice_(c, s) for c, s in zip(crd, trg)])))
else:
assert False # tuple "int" : Error
elif crd is None:
# match C++ behavior `return cute::tuple<B>{b};`
return (trg,)
else:
return ()
# Determine if None appears at any of an int_tuples' terminals
def has_none(a: Union[None, tuple, int]):
if is_tuple(a):
return any(has_none(v) for v in a)
else:
return a is None
| python/pycute/int_tuple.py/0 | {
"file_path": "python/pycute/int_tuple.py",
"repo_id": "python",
"token_count": 3526
} | 55 |
################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Unit test for load nodes in SM90
"""
import logging
import unittest
import cutlass
from cutlass.backend import *
from cutlass.epilogue import *
from utils.evt_testbed import EVTTestBed, EVTTestCaseBase
cutlass.set_log_level(logging.WARNING)
@unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]")
class TestEVTLoad(EVTTestCaseBase):
def test_tensor_load(self):
"""
Load extra tensor with shape [m, n]
"""
def evt_tensor_load(accum, C, aux, aux_batch):
D = accum + C + aux + aux_batch
return D
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"C": self.fake_tensor(self.element, (l, m, n)),
"aux": self.fake_tensor(self.element, (m, n)),
"aux_batch": self.fake_tensor(np.float32, (l, m, n)),
"D": self.fake_tensor(self.element, (l, m, n)),
}
launcher = EVTTestBed(self.element, evt_tensor_load, example_inputs)
input_keys = ["C", "aux", "aux_batch"]
result_keys = ["D"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_row_broadcast(self):
"""
Load extra tensor with shape [1, n]
"""
def evt_row_broadcast(accum, C, bias, bias_batch):
D = accum + C + bias + bias_batch
return D
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"C": self.fake_tensor(self.element, (l, m, n)),
"bias": self.fake_tensor(self.element, (n,)),
"bias_batch": self.fake_tensor(np.float32, (l, 1, n)),
"D": self.fake_tensor(self.element, (l, m, n)),
}
launcher = EVTTestBed(self.element, evt_row_broadcast, example_inputs)
input_keys = ["C", "bias", "bias_batch"]
result_keys = ["D"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_column_broadcast(self):
"""
Load extra tensor with shape [m, 1]
"""
def evt_column_broadcast(accum, C, bias, bias_batch):
D = accum + C + bias + bias_batch
return D
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"C": self.fake_tensor(self.element, (l, m, n)),
"bias": self.fake_tensor(self.element, (m, 1)),
"bias_batch": self.fake_tensor(np.float32, (l, m, 1)),
"D": self.fake_tensor(self.element, (l, m, n)),
}
launcher = EVTTestBed(self.element, evt_column_broadcast, example_inputs)
input_keys = ["C", "bias", "bias_batch"]
result_keys = ["D"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_scalar_broadcast(self):
"""
Load extra tensor with shape [1, 1]
"""
def evt_scalar_broadcast(accum, C, alpha, alpha_batch):
D = accum + C + alpha + alpha_batch
return D
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"C": self.fake_tensor(self.element, (l, m, n)),
"alpha": 0.5,
"alpha_batch": self.fake_tensor(np.float32, (l, 1, 1)),
"D": self.fake_tensor(self.element, (l, m, n)),
}
launcher = EVTTestBed(self.element, evt_scalar_broadcast, example_inputs)
input_keys = ["C", "alpha", "alpha_batch"]
result_keys = ["D"]
launcher.verify((m, n, k), input_keys, result_keys, l)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/evt/evt_load_sm80_90.py/0 | {
"file_path": "test/python/cutlass/evt/evt_load_sm80_90.py",
"repo_id": "test",
"token_count": 2528
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <cute/tensor.hpp>
TEST(CuTe_core, Tuple)
{
using namespace cute;
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("SIMPLE STATIC AND DYNAMIC TUPLES");
CUTLASS_TRACE_HOST("-------------------------------");
using tuple_2d_s_type = tuple<_8, _4>; // (8,4)
using tuple_3d_s_type = tuple<_8, _4, _2>; // (8,4,2)
using tuple_3h_s_type = tuple<tuple<_1, _2>, _8, _2>; // ((1,2),8,2)
using tuple_2d_d_type = tuple<int, int>; // (8,4)
using tuple_3d_d_type = tuple<int, int, int>; // (8,4,2)
using tuple_3h_d_type = tuple<tuple<int, int>, int, int>; // ((1,2),8,2)
using tuple_2d_m_type = tuple<_8, int>; // (8,4)
using tuple_3d_m_type = tuple<int, int, _2>; // (8,4,2)
using tuple_3h_m_type = tuple<tuple<int, _2>, int, int>; // ((1,2),8,2)
tuple_2d_s_type tuple_2d_s;
tuple_3d_s_type tuple_3d_s;
tuple_3h_s_type tuple_3h_s;
tuple_2d_d_type tuple_2d_d(8,4);
tuple_3d_d_type tuple_3d_d(8,4,2);
tuple_3h_d_type tuple_3h_d(tuple<int,int>(1,2),8,2);
tuple_2d_m_type tuple_2d_m(_8{}, 4);
tuple_3d_m_type tuple_3d_m(8,4,_2{});
tuple_3h_m_type tuple_3h_m(tuple<int,_2>(1,_2{}),8,2);
CUTLASS_TRACE_HOST(tuple_2d_s << (is_static<tuple_2d_s_type>::value ? " Static " : " Dynamic ")
<< "sizeof = " << sizeof(tuple_2d_s_type));
ASSERT_TRUE(is_static<tuple_2d_s_type>::value == true);
ASSERT_TRUE(sizeof(tuple_2d_s_type) == 1);
ASSERT_TRUE(std::is_empty<tuple_2d_s_type>::value);
CUTLASS_TRACE_HOST(tuple_3d_s << (is_static<tuple_3d_s_type>::value ? " Static " : " Dynamic ")
<< "sizeof = " << sizeof(tuple_3d_s_type));
ASSERT_TRUE(is_static<tuple_3d_s_type>::value == true);
ASSERT_TRUE(sizeof(tuple_3d_s_type) == 1);
ASSERT_TRUE(std::is_empty<tuple_3d_s_type>::value);
CUTLASS_TRACE_HOST(tuple_3h_s << (is_static<tuple_3h_s_type>::value ? " Static " : " Dynamic ")
<< "sizeof = " << sizeof(tuple_3h_s_type));
ASSERT_TRUE(is_static<tuple_3h_s_type>::value == true);
ASSERT_TRUE(sizeof(tuple_3h_s_type) == 1);
ASSERT_TRUE(std::is_empty<tuple_3h_s_type>::value);
CUTLASS_TRACE_HOST(tuple_2d_d << (is_static<tuple_2d_d_type>::value ? " Static " : " Dynamic ")
<< "sizeof = " << sizeof(tuple_2d_d_type));
ASSERT_TRUE(is_static<tuple_2d_d_type>::value == false);
ASSERT_TRUE(sizeof(tuple_2d_d_type) == 8);
ASSERT_TRUE(!std::is_empty<tuple_2d_d_type>::value);
CUTLASS_TRACE_HOST(tuple_3d_d << (is_static<tuple_3d_d_type>::value ? " Static " : " Dynamic ")
<< "sizeof = " << sizeof(tuple_3d_d_type));
ASSERT_TRUE(is_static<tuple_3d_d_type>::value == false);
ASSERT_TRUE(sizeof(tuple_3d_d_type) == 12);
ASSERT_TRUE(!std::is_empty<tuple_3d_d_type>::value);
CUTLASS_TRACE_HOST(tuple_3h_d << (is_static<tuple_3h_d_type>::value ? " Static " : " Dynamic ")
<< "sizeof = " << sizeof(tuple_3h_d_type));
ASSERT_TRUE(is_static<tuple_3h_d_type>::value == false);
ASSERT_TRUE(sizeof(tuple_3h_d_type) == 16);
ASSERT_TRUE(!std::is_empty<tuple_3h_d_type>::value);
CUTLASS_TRACE_HOST(tuple_2d_m << (is_static<tuple_2d_m_type>::value ? " Static " : " Dynamic ")
<< "sizeof = " << sizeof(tuple_2d_m_type));
ASSERT_TRUE(is_static<tuple_2d_m_type>::value == false);
ASSERT_TRUE(sizeof(tuple_2d_m_type) == 4);
ASSERT_TRUE(!std::is_empty<tuple_2d_m_type>::value);
CUTLASS_TRACE_HOST(tuple_3d_m << (is_static<tuple_3d_m_type>::value ? " Static " : " Dynamic ")
<< "sizeof = " << sizeof(tuple_3d_m_type));
ASSERT_TRUE(is_static<tuple_3d_m_type>::value == false);
ASSERT_TRUE(sizeof(tuple_3d_m_type) == 8);
ASSERT_TRUE(!std::is_empty<tuple_3d_m_type>::value);
CUTLASS_TRACE_HOST(tuple_3h_m << (is_static<tuple_3h_m_type>::value ? " Static " : " Dynamic ")
<< "sizeof = " << sizeof(tuple_3h_m_type));
ASSERT_TRUE(is_static<tuple_3h_m_type>::value == false);
ASSERT_TRUE(sizeof(tuple_3h_m_type) == 12);
ASSERT_TRUE(!std::is_empty<tuple_3h_m_type>::value);
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("SIMPLE TUPLE OPS");
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("product(" << tuple_2d_s << ") => " << product(tuple_2d_s));
CUTE_STATIC_ASSERT_V(product(tuple_2d_s) == _32{});
CUTLASS_TRACE_HOST("product(" << tuple_3d_s << ") => " << product(tuple_3d_s));
CUTE_STATIC_ASSERT_V(product(tuple_3d_s) == _64{});
CUTLASS_TRACE_HOST("product(" << tuple_3h_s << ") => " << product(tuple_3h_s));
CUTE_STATIC_ASSERT_V(product(tuple_3h_s) == _32{});
CUTLASS_TRACE_HOST("product(" << tuple_2d_d << ") => " << product(tuple_2d_d));
ASSERT_TRUE(product(tuple_2d_d) == 32);
CUTLASS_TRACE_HOST("product(" << tuple_3d_d << ") => " << product(tuple_3d_d));
ASSERT_TRUE(product(tuple_3d_d) == 64);
CUTLASS_TRACE_HOST("product(" << tuple_3h_d << ") => " << product(tuple_3h_d));
ASSERT_TRUE(product(tuple_3h_d) == 32);
CUTLASS_TRACE_HOST("product(" << tuple_2d_m << ") => " << product(tuple_2d_m));
ASSERT_TRUE(product(tuple_2d_m) == 32);
CUTLASS_TRACE_HOST("product(" << tuple_3d_m << ") => " << product(tuple_3d_m));
ASSERT_TRUE(product(tuple_3d_m) == 64);
CUTLASS_TRACE_HOST("product(" << tuple_3h_m << ") => " << product(tuple_3h_m));
ASSERT_TRUE(product(tuple_3h_m) == 32);
CUTLASS_TRACE_HOST("max(" << tuple_2d_s << ") => " << max(tuple_2d_s));
CUTE_STATIC_ASSERT_V(max(tuple_2d_s) == _8{});
CUTLASS_TRACE_HOST("max(" << tuple_3d_s << ") => " << max(tuple_3d_s));
CUTE_STATIC_ASSERT_V(max(tuple_3d_s) == _8{});
CUTLASS_TRACE_HOST("max(" << tuple_3h_s << ") => " << max(tuple_3h_s));
CUTE_STATIC_ASSERT_V(max(tuple_3h_s) == _8{});
CUTLASS_TRACE_HOST("max(" << tuple_2d_d << ") => " << max(tuple_2d_d));
ASSERT_TRUE(max(tuple_2d_d) == 8);
CUTLASS_TRACE_HOST("max(" << tuple_3d_d << ") => " << max(tuple_3d_d));
ASSERT_TRUE(max(tuple_3d_d) == 8);
CUTLASS_TRACE_HOST("max(" << tuple_3h_d << ") => " << max(tuple_3h_d));
ASSERT_TRUE(max(tuple_3h_d) == 8);
CUTLASS_TRACE_HOST("max(" << tuple_2d_m << ") => " << max(tuple_2d_m));
ASSERT_TRUE(max(tuple_2d_m) == 8);
CUTLASS_TRACE_HOST("max(" << tuple_3d_m << ") => " << max(tuple_3d_m));
ASSERT_TRUE(max(tuple_3d_m) == 8);
CUTLASS_TRACE_HOST("max(" << tuple_3h_m << ") => " << max(tuple_3h_m));
ASSERT_TRUE(max(tuple_3h_m) == 8);
// 2d s|d|m
CUTLASS_TRACE_HOST("inner_product(" << tuple_2d_s << ", " << tuple_2d_s << ") => "
<< inner_product(tuple_2d_s, tuple_2d_s));
CUTE_STATIC_ASSERT_V(inner_product(tuple_2d_s, tuple_2d_s) == Int<80>{});
CUTLASS_TRACE_HOST("inner_product(" << tuple_2d_d << ", " << tuple_2d_d << ") => "
<< inner_product(tuple_2d_d, tuple_2d_d));
ASSERT_TRUE(inner_product(tuple_2d_d, tuple_2d_d) == 80);
CUTLASS_TRACE_HOST("inner_product(" << tuple_2d_m << ", " << tuple_2d_m << ") => "
<< inner_product(tuple_2d_m, tuple_2d_m));
ASSERT_TRUE(inner_product(tuple_2d_m, tuple_2d_m) == 80);
// 3d s|d|m
CUTLASS_TRACE_HOST("inner_product(" << tuple_3d_s << ", " << tuple_3d_s << ") => "
<< inner_product(tuple_3d_s, tuple_3d_s));
CUTE_STATIC_ASSERT_V(inner_product(tuple_3d_s, tuple_3d_s) == Int<84>{});
CUTLASS_TRACE_HOST("inner_product(" << tuple_3d_d << ", " << tuple_3d_d << ") => "
<< inner_product(tuple_3d_d, tuple_3d_d));
ASSERT_TRUE(inner_product(tuple_3d_d, tuple_3d_d) == 84);
CUTLASS_TRACE_HOST("inner_product(" << tuple_3d_m << ", " << tuple_3d_m << ") => "
<< inner_product(tuple_3d_m, tuple_3d_m));
ASSERT_TRUE(inner_product(tuple_3d_m, tuple_3d_m) == 84);
// 3h s|d|m
CUTLASS_TRACE_HOST("inner_product(" << tuple_3h_s << ", " << tuple_3h_s << ") => "
<< inner_product(tuple_3h_s, tuple_3h_s));
CUTE_STATIC_ASSERT_V(inner_product(tuple_3h_s, tuple_3h_s) == Int<73>{});
CUTLASS_TRACE_HOST("inner_product(" << tuple_3h_d << ", " << tuple_3h_d << ") => "
<< inner_product(tuple_3h_d, tuple_3h_d));
ASSERT_TRUE(inner_product(tuple_3h_d, tuple_3h_d) == 73);
CUTLASS_TRACE_HOST("inner_product(" << tuple_3h_m << ", " << tuple_3h_m << ") => "
<< inner_product(tuple_3h_m, tuple_3h_m));
ASSERT_TRUE(inner_product(tuple_3h_m, tuple_3h_m) == 73);
CUTLASS_TRACE_HOST("col_major(" << tuple_2d_s << ") => " << compact_col_major(tuple_2d_s));
CUTE_STATIC_ASSERT_V((compact_col_major(tuple_2d_s) == make_tuple(_1{},_8{})));
CUTLASS_TRACE_HOST("col_major(" << tuple_3d_s << ") => " << compact_col_major(tuple_3d_s));
CUTE_STATIC_ASSERT_V((compact_col_major(tuple_3d_s) == make_tuple(_1{},_8{},_32{})));
CUTLASS_TRACE_HOST("col_major(" << tuple_3h_s << ") => " << compact_col_major(tuple_3h_s));
CUTE_STATIC_ASSERT_V((compact_col_major(tuple_3h_s) == make_tuple(make_tuple(_0{},_1{}),_2{},_16{})));
CUTLASS_TRACE_HOST("col_major(" << tuple_2d_d << ") => " << compact_col_major(tuple_2d_d));
ASSERT_TRUE((compact_col_major(tuple_2d_d) == make_tuple(_1{},8)));
CUTLASS_TRACE_HOST("col_major(" << tuple_3d_d << ") => " << compact_col_major(tuple_3d_d));
ASSERT_TRUE((compact_col_major(tuple_3d_d) == make_tuple(_1{},8,32)));
CUTLASS_TRACE_HOST("col_major(" << tuple_3h_d << ") => " << compact_col_major(tuple_3h_d));
ASSERT_TRUE((compact_col_major(tuple_3h_d) == make_tuple(make_tuple(_1{},1),2,16)));
CUTLASS_TRACE_HOST("col_major(" << tuple_2d_m << ") => " << compact_col_major(tuple_2d_m));
ASSERT_TRUE((compact_col_major(tuple_2d_m) == make_tuple(_1{},_8{})));
CUTLASS_TRACE_HOST("col_major(" << tuple_3d_m << ") => " << compact_col_major(tuple_3d_m));
ASSERT_TRUE((compact_col_major(tuple_3d_m) == make_tuple(_1{},8,32)));
CUTLASS_TRACE_HOST("col_major(" << tuple_3h_m << ") => " << compact_col_major(tuple_3h_m));
ASSERT_TRUE((compact_col_major(tuple_3h_m) == make_tuple(make_tuple(_1{},1),2,16)));
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("SLICING TUPLES");
CUTLASS_TRACE_HOST("-------------------------------");
{
auto a = Coord<_2,_3,_4,Coord<_5,_6>>{};
CUTLASS_TRACE_HOST("a = " << a);
CUTLASS_TRACE_HOST("a(1) = " << slice(1, a));
CUTLASS_TRACE_HOST("a(_) = " << slice(_, a));
CUTLASS_TRACE_HOST("a(_,1,_,_) = " << slice(make_coord(_,1,_,_), a));
CUTLASS_TRACE_HOST("a(_,1,_,(_,_)) = " << slice(make_coord(_,1,_,make_coord(_,_)), a));
CUTLASS_TRACE_HOST("a(_,1,_,(_,2)) = " << slice(make_coord(_,1,_,make_coord(_,2)), a));
CUTLASS_TRACE_HOST("a(_,1,_,(1,2)) = " << slice(make_coord(_,1,_,make_coord(1,2)), a));
}
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("DICING TUPLES");
CUTLASS_TRACE_HOST("-------------------------------");
{
auto a = Coord<_2,_3,_4,Coord<_5,_6>>{};
CUTLASS_TRACE_HOST("a = " << a);
CUTLASS_TRACE_HOST("a(1) = " << dice(1, a));
CUTLASS_TRACE_HOST("a(_) = " << dice(_, a));
CUTLASS_TRACE_HOST("a(_,1,_,_) = " << dice(make_coord(_,1,_,_), a));
CUTLASS_TRACE_HOST("a(_,1,_,(_,_)) = " << dice(make_coord(_,1,_,make_coord(_,_)), a));
CUTLASS_TRACE_HOST("a(_,1,_,(_,2)) = " << dice(make_coord(_,1,_,make_coord(_,2)), a));
CUTLASS_TRACE_HOST("a(_,1,_,(1,2)) = " << dice(make_coord(_,1,_,make_coord(1,2)), a));
}
}
| test/unit/cute/core/tuple.cpp/0 | {
"file_path": "test/unit/cute/core/tuple.cpp",
"repo_id": "test",
"token_count": 6293
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename TileIterator>
__global__ void kernel_store_iterator(
typename TileIterator::Params params,
typename TileIterator::TensorRef ref,
cutlass::MatrixCoord extent) {
TileIterator iterator(params, ref.data(), extent, threadIdx.x, {0, 0});
typename TileIterator::Fragment fragment;
CUTLASS_PRAGMA_NO_UNROLL
for (int iter = 0; iter < TileIterator::ThreadMap::Count::kTile; ++iter) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < TileIterator::Fragment::kElements; ++i) {
typename TileIterator::Element tidx(iter + 1);
fragment[i] = tidx;
}
iterator.store(fragment);
++iterator;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, typename Layout>
static bool verify_footprint(cutlass::TensorView<T, Layout> view, cutlass::MatrixCoord extent) {
for (int r = 0; r < view.extent().row(); ++r) {
for (int c = 0; c < view.extent().column(); ++c) {
cutlass::MatrixCoord coord{r, c};
bool within = coord < extent;
if (within) {
if (view.at(coord) == T(0)) {
return false;
}
}
else {
if (view.at(coord) != T(0)) {
return false;
}
}
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, tensor_op_64x64x32_64x64x8) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 32;
//
// The following tests were used to develop the OutputTileOptimalThreadMap
// metaprogram. The definitions in the disabled blocks of code in this and
// the following tests are hand-written quantities. They are expected to
// match what is defined in the ThreadMap.
//
#if 1
using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap <
cutlass::epilogue::threadblock::OutputTileShape<64, 8, 1, 1, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 8, 1, 1, 8>,
kThreads,
kElementsPerAccess,
cutlass::sizeof_bits<Element>::value
>;
#else
using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<64, 64>,
kThreads,
kElementsPerAccess
>;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<
64, // column
8, // row
1, // group
1, // cluster
1 // iterations
>;
using Iterations = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
4, // row
1, // group
1, // cluster
1 // iterations
>;
using Delta = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
2, // row
1, // group
1, // cluster
1 // iterations
>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
8, // row
1, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap,
Shape,
Iterations,
Delta,
Count
>;
#endif
using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
ThreadMap,
Element
>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{64, 64};
cutlass::MatrixCoord output_extent{62, 56};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1,1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>(
iterator_params, host_tensor.device_ref(), output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("tensor_op_64x64x32_64x64x8.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, tensor_op_128x64x32_64x64x8) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 64;
#if 1
using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap <
cutlass::epilogue::threadblock::OutputTileShape<128, 8, 2, 1, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 8>,
kThreads,
kElementsPerAccess,
cutlass::sizeof_bits<Element>::value
>;
#else
using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<64, 128>,
kThreads,
kElementsPerAccess
>;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<
64, // column
8, // row
2, // group
1, // cluster
8 // tile
>;
using Iterations = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
2, // row
2, // group
1, // cluster
1 // iterations
>;
using Delta = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
4, // row
64, // group
1, // cluster
1 // tile
>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
8, // row
1, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap,
Shape,
Iterations,
Delta,
Count
>;
#endif
using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
ThreadMap,
Element
>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{128, 64};
cutlass::MatrixCoord output_extent{125, 56};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1,1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>(
iterator_params, host_tensor.device_ref(), output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("tensor_op_128x64x32_64x64x8.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, tensor_op_128x256x32_64x64x8) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 256;
#if 1
using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap <
cutlass::epilogue::threadblock::OutputTileShape<256, 8, 2, 1, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 8>,
kThreads,
kElementsPerAccess,
cutlass::sizeof_bits<Element>::value
>;
#else
using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<256, 128>,
kThreads,
kElementsPerAccess
>;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<
256, // column
8, // row
2, // group
1, // cluster
8 // tile
>;
using Iterations = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
2, // row
2, // group
1, // cluster
1 // iterations
>;
using Delta = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
4, // row
64, // group
1, // cluster
1 // tile
>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
8, // row
1, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap,
Shape,
Iterations,
Delta,
Count
>;
#endif
using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
ThreadMap,
Element
>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{128, 256};
cutlass::MatrixCoord output_extent{123, 252};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1,1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>(
iterator_params, host_tensor.device_ref(), output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("tensor_op_128x256x32_64x64x8.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, volta_tensor_op_64x64x32_64x64x4) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 32;
#if 1
using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap <
cutlass::epilogue::threadblock::OutputTileShape<64, 2, 4, 1, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>,
kThreads,
kElementsPerAccess,
cutlass::sizeof_bits<Element>::value
>;
#else
using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<64, 8>,
kThreads,
kElementsPerAccess
>;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<
64, // column
2, // row
4, // group
1, // cluster
8 // iterations
>;
using Iterations = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
1, // row
4, // group
1, // cluster
1 // iterations
>;
using Delta = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
1, // row
8, // group
1, // cluster
1 // iterations
>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
4, // row
2, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap,
Shape,
Iterations,
Delta,
Count
>;
#endif
using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
ThreadMap,
Element
>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{64, 64};
cutlass::MatrixCoord output_extent{62, 56};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1,1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>(
iterator_params, host_tensor.device_ref(), output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("volta_tensor_op_64x64x32_64x64x4.csv");
output << host_tensor.host_view();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, volta_tensor_op_64x128x32_32x64x4) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 128;
#if 1
using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap <
cutlass::epilogue::threadblock::OutputTileShape<128, 2, 4, 1, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>,
kThreads,
kElementsPerAccess,
cutlass::sizeof_bits<Element>::value
>;
#else
using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<128, 8>,
kThreads,
kElementsPerAccess
>;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<
128, // column
2, // row
2, // group
2, // cluster
8 // iterations
>;
using Iterations = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
1, // row
1, // group
2, // cluster
1 // iterations
>;
using Delta = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
1, // row
8, // group
32, // cluster
1 // iterations
>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
4, // row
4, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap,
Shape,
Iterations,
Delta,
Count
>;
#endif
using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
ThreadMap,
Element
>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{64, 128};
cutlass::MatrixCoord output_extent{57, 124};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1,1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>(
iterator_params, host_tensor.device_ref(), output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("volta_tensor_op_64x128x32_32x64x4.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, volta_tensor_op_128x256x32_64x64x4) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 256;
#if 1
using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap <
cutlass::epilogue::threadblock::OutputTileShape<256, 2, 4, 2, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>,
kThreads,
kElementsPerAccess,
cutlass::sizeof_bits<Element>::value
>;
#else
using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<256, 16>,
kThreads,
kElementsPerAccess
>;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<
256, // column
2, // row
4, // group
2, // cluster
8 // iterations
>;
using Iterations = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
1, // row
2, // group
2, // cluster
1 // iterations
>;
using Delta = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
1, // row
16, // group
64, // cluster
1 // iterations
>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
4, // row
2, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap,
Shape,
Iterations,
Delta,
Count
>;
#endif
using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
ThreadMap,
Element
>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{128, 256};
cutlass::MatrixCoord output_extent{128, 256};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1,1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>(
iterator_params, host_tensor.device_ref(), output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed || true) {
std::ofstream output("volta_tensor_op_128x256x32_64x64x4.csv");
output << host_tensor.host_view();
}
}
TEST(PredicatedTileIterator, volta_tensor_op_256x128x32_64x64x4) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 256;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap <
cutlass::epilogue::threadblock::OutputTileShape<128, 2, 4, 4, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>,
kThreads,
kElementsPerAccess,
cutlass::sizeof_bits<Element>::value
>;
using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
ThreadMap,
Element
>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{ 256, 128 };
cutlass::MatrixCoord output_extent{ 256, 128 };
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator> <<< grid, block >>>(
iterator_params, host_tensor.device_ref(), output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed || true) {
std::ofstream output("volta_tensor_op_256x128x32_64x64x4.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, simt_32x64x8_32x64x1) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess = 32 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 32;
#if 1
using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap <
cutlass::epilogue::threadblock::OutputTileShape<64, 1, 4, 1, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>,
kThreads,
kElementsPerAccess,
cutlass::sizeof_bits<Element>::value
>;
#else
using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<64, 4>,
kThreads,
kElementsPerAccess
>;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<
64, // column
1, // row
4, // group
1, // cluster
1 // iterations
>;
using Iterations = cutlass::epilogue::threadblock::OutputTileShape<
2, // column
1, // row
4, // group
1, // cluster
1 // iterations
>;
using Delta = cutlass::epilogue::threadblock::OutputTileShape<
32, // column
1, // row
4, // group
16, // cluster
1 // iterations
>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
4, // row
2, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap,
Shape,
Iterations,
Delta,
Count
>;
#endif
using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
ThreadMap,
Element
>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{32, 64};
cutlass::MatrixCoord output_extent{27, 63};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1,1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>(
iterator_params, host_tensor.device_ref(), output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("simt_32x64x8_32x64x1.csv");
output << host_tensor.host_view();
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(PredicatedTileIterator, simt_128x128x8_32x64x1) {
using Layout = cutlass::layout::RowMajor;
using Element = int;
static int const kElementsPerAccess = 32 / cutlass::sizeof_bits<Element>::value;
static int const kThreads = 256;
#if 1
using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap <
cutlass::epilogue::threadblock::OutputTileShape<128, 1, 4, 4, 1>,
cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>,
kThreads,
kElementsPerAccess,
cutlass::sizeof_bits<Element>::value
>;
#else
using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<128, 16>,
kThreads,
kElementsPerAccess
>;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<
128, // column
1, // row
4, // group
4, // cluster
1 // iterations
>;
using Iterations = cutlass::epilogue::threadblock::OutputTileShape<
2, // column
1, // row
2, // group
4, // cluster
1 // iterations
>;
using Delta = cutlass::epilogue::threadblock::OutputTileShape<
32, // column
1, // row
8, // group
32, // cluster
1 // iterations
>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
4, // row
2, // group
1, // cluster
8 // iterations
>;
using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap<
InternalThreadMap,
Shape,
Iterations,
Delta,
Count
>;
#endif
using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
ThreadMap,
Element
>;
//
// Initialize workspace
//
cutlass::MatrixCoord tensor_extent{128, 128};
cutlass::MatrixCoord output_extent{123, 121};
//
// Configure parameters
//
cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent);
typename PredicatedTileIterator::Params iterator_params(host_tensor.layout());
host_tensor.sync_device();
//
// Launch kernel
//
dim3 grid(1,1);
dim3 block(kThreads, 1);
test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>(
iterator_params, host_tensor.device_ref(), output_extent);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result);
//
// Verify results
//
host_tensor.sync_host();
bool passed = verify_footprint(host_tensor.host_view(), output_extent);
EXPECT_TRUE(passed);
if (!passed) {
std::ofstream output("simt_128x128x8_32x64x1.csv");
output << host_tensor.host_view();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/threadblock/predicated_tile_iterator.cu/0 | {
"file_path": "test/unit/epilogue/threadblock/predicated_tile_iterator.cu",
"repo_id": "test",
"token_count": 10243
} | 58 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for Sm90 f16_f16_f16 cooperative DAG epilogue
EVTDAG: D = beta * C + Graph(relu(alpha * acc + aux) + aux)
DAGEVT: EVT = alpha * acc + C, D = Graph(maximum(EVT + per-row bias, EVT))
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h"
#include "../../common/cutlass_unit_test.h"
#include "gemm_testbed_3x_evt.hpp"
#include "sm90_evt_operations.hpp"
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_EVTDAG) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule>;
using AuxLoadDescriptor = cutlass::epilogue::collective::detail::AuxLoadDescriptor<
EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t>;
using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombEVTDAG<
EpilogueDescriptor, AuxLoadDescriptor, cutlass::half_t, float, float>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
// Host reference
bool passed = test::gemm::device::TestAllEVT<Gemm, test::gemm::device::HostEVTDAG<Gemm>>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 128x128x64_2x2x1_DAGEVT) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t>;
using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombDAGEVT<
EpilogueDescriptor, AuxStoreDescriptor, cutlass::half_t, float, cutlass::half_t, float>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
// Host reference
bool passed = test::gemm::device::TestAllEVT<Gemm, test::gemm::device::HostDAGEVT<Gemm>>();
EXPECT_TRUE(passed);
}
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
| test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_dag.cu/0 | {
"file_path": "test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_dag.cu",
"repo_id": "test",
"token_count": 2633
} | 59 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide Symm update interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/blas3.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/reference/host/symm.h"
#include "cutlass/util/reference/host/symm_complex.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Symm>
struct TestbedSymmUniversal {
using ElementA = typename Symm::ElementA;
using ElementB = typename Symm::ElementB;
using ElementC = typename Symm::ElementC;
using ElementAccumulator = typename Symm::ElementAccumulator;
using ElementCompute = typename Symm::SymmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<typename Symm::ElementA, typename Symm::LayoutA> tensor_A;
cutlass::HostTensor<typename Symm::ElementB, typename Symm::LayoutB> tensor_B;
cutlass::HostTensor<typename Symm::ElementC, typename Symm::LayoutC> tensor_C;
cutlass::HostTensor<typename Symm::ElementC, typename Symm::LayoutC> tensor_D;
cutlass::HostTensor<typename Symm::ElementC, typename Symm::LayoutC> reference_D;
//
// Methods
//
TestbedSymmUniversal(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed,
int mantissa_in_bits) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Symm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Input distribution not implemented";
return false;
}
return true;
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_symmetric_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed,
int mantissa_in_bits) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Symm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillSymmetricRandomUniform(
view, seed, Symm::kFillModeA, scope_max, scope_min, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillSymmetricRandomGaussian(
view, seed, Symm::kFillModeA, 0, 0.5, mantissa_in_bits);
}
else {
EXPECT_TRUE(false) << "Input distribution (symmetric tensor) not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the Symm workspace
//
if (Symm::kSideModeA == cutlass::SideMode::kLeft) {
tensor_A.resize(cutlass::make_Coord(problem_size.m(),problem_size.m()));
}
else if (Symm::kSideModeA == cutlass::SideMode::kRight) {
tensor_A.resize(cutlass::make_Coord(problem_size.n(),problem_size.n()));
}
tensor_B.resize(problem_size.mn());
tensor_C.resize(problem_size.mn());
tensor_D.resize(problem_size.mn());
reference_D.resize(problem_size.mn(), false);
EXPECT_TRUE(initialize_symmetric_tensor(tensor_A.host_view(), init_A, seed + 2019, cutlass::MantissaInBits<typename Symm::ElementA>::bits));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018, cutlass::MantissaInBits<typename Symm::ElementB>::bits));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017, cutlass::MantissaInBits<typename Symm::ElementC>::bits));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename Symm::ElementA(1);
tensor_B.host_view().at({0, 0}) = typename Symm::ElementB(1);
tensor_C.host_view().at({0, 0}) = typename Symm::ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
if (tensor_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
if (reference_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
double l2_norm = cutlass::reference::host::TensorRelativeErrorMetric(reference_D.host_view(), tensor_D.host_view());
bool passed = l2_norm < cutlass::MantissaInBits<typename Symm::ElementA>::error;
return passed;
}
/// Verifies the result is a Symm
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
using HostReference = typename cutlass::platform::conditional<
(cutlass::platform::is_same<typename Symm::ElementC,
cutlass::complex<double>
>::value ||
cutlass::platform::is_same<typename Symm::ElementC,
cutlass::complex<float>
>::value
),
cutlass::reference::host::SymmComplex<
typename Symm::ElementA, typename Symm::LayoutA,
Symm::kSideModeA, Symm::kFillModeA,
typename Symm::ElementB, typename Symm::LayoutB,
typename Symm::ElementC, typename Symm::LayoutC,
ElementCompute,
ElementAccumulator,
Symm::kBlasMode>,
cutlass::reference::host::Symm<
typename Symm::ElementA, typename Symm::LayoutA,
Symm::kSideModeA, Symm::kFillModeA,
typename Symm::ElementB, typename Symm::LayoutB,
typename Symm::ElementC, typename Symm::LayoutC,
ElementCompute,
ElementAccumulator>
>::type;
HostReference reference_symm;
reference_symm(
problem_size,
alpha,
tensor_A.host_ref(),
tensor_B.host_ref(),
beta,
tensor_C.host_ref(),
reference_D.host_ref(),
ElementAccumulator(0)
);
return compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Symm::SymmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmUniversalMode mode,
cutlass::gemm::GemmCoord problem_size,
int batch_count = 1,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
#if 0
std::cout << "[TestbedSymmUniversal::run()] problem(m, n, k): " << problem_size
<< " alpha: " << ElementCompute(alpha)
<< " beta: " << ElementCompute(beta) << std::endl;
#endif
this->initialize(problem_size);
//
// Initialize the Symm operator
//
int batch_stride_A;
if (Symm::kSideModeA == cutlass::SideMode::kLeft)
batch_stride_A = problem_size.m()*problem_size.m();
if (Symm::kSideModeA == cutlass::SideMode::kRight)
batch_stride_A = problem_size.n()*problem_size.n();
typename Symm::Arguments arguments{
mode,
problem_size,
batch_count,
{alpha, beta},
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data(),
tensor_D.device_data(),
batch_stride_A,
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
tensor_A.layout().stride(0),
tensor_B.layout().stride(0),
tensor_C.layout().stride(0),
tensor_D.layout().stride(0)
};
Symm symm_op;
size_t workspace_size = Symm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = symm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the Symm
//
status = symm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
//if (true) {
if (!passed) {
std::stringstream fname;
fname << "error_"
<< (Symm::kBlasMode == cutlass::BlasMode::kSymmetric ? "symm_" : "hemm_" )
<< "device_"
<< "fill_mode_a_"
<< (Symm::kSideModeA == cutlass::SideMode::kLeft ? "leftside_" :
(Symm::kSideModeA == cutlass::SideMode::kRight ? "rightside_" : "invalid_"))
<< (Symm::kFillModeA == cutlass::FillMode::kLower ? "lower_" :
(Symm::kFillModeA == cutlass::FillMode::kUpper ? "upper_" : "invalid_"))
<< "mnk_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Symm::ThreadblockShape::kM << "x"
<< Symm::ThreadblockShape::kN << "x"
<< Symm::ThreadblockShape::kK << "_"
<< Symm::WarpShape::kM << "x"
<< Symm::WarpShape::kN << "x"
<< Symm::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "alpha: " << ElementCompute(alpha) << "\n"
<< "beta: " << ElementCompute(beta) << "\n"
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nB:\n" << tensor_B.host_view() << "\n"
<< "\nC:\n" << tensor_C.host_view() << "\n"
<< "\nD reference:\n" << reference_D.host_view() << "\n"
<< "\nD computed:\n" << tensor_D.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Symm>
bool TestsymmUniversal(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmUniversalMode mode,
int batch_count,
double alpha = 1.0,
double beta = 2.0) {
bool passed = true;
TestbedSymmUniversal<Symm> testbed;
using ElementCompute = typename Symm::EpilogueOutputOp::ElementCompute;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
return passed;
}
template <typename Symm>
bool TestAllSymmUniversal() {
bool passed = true;
int const kMinimumOperandElementSize = int(cutlass::sizeof_bits<typename Symm::ElementA>::value);
int const kAlignment = cutlass::platform::is_same<
typename Symm::OperatorClass,
cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize;
// int8_t gemm alignment constraints
int const kAlignmentM = cutlass::platform::is_same<typename Symm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Symm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Symm::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment;
int const kAlignmentN = kAlignmentM;
int const kAlignmentK = cutlass::platform::is_same<typename Symm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Symm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Symm::LayoutA, cutlass::layout::RowMajor>::value
? 4 : kAlignment;
cutlass::gemm::GemmUniversalMode modes[] = {
cutlass::gemm::GemmUniversalMode::kGemm,
};
int problem_size_m[] = {
kAlignmentK,
Symm::ThreadblockShape::kK * Symm::kStages - kAlignmentK,
Symm::ThreadblockShape::kK * Symm::kStages * 3 - kAlignmentK
};
int problem_size_n[] = {
kAlignmentN, 512 - 2*kAlignmentN
};
int batch_counts[] = { // may be interpretted as batch count or split-K slices
1 // Just running one batch for now (removing 2, 3, 5, 7)
};
double problem_alpha[] = {
1.0, 3.0
};
double problem_beta[] = {
0, 2.0
};
using ElementCompute = typename Symm::EpilogueOutputOp::ElementCompute;
for (cutlass::gemm::GemmUniversalMode mode : modes) {
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int batch_count : batch_counts) {
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
int k = 0;
if (Symm::kSideModeA == cutlass::SideMode::kLeft)
k = m;
else if (Symm::kSideModeA == cutlass::SideMode::kRight)
k = n;
if (mode == cutlass::gemm::GemmUniversalMode::kGemm ||
mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) {
#if 0
// skip very small K problems
if (k / batch_count < 2 * Symm::ThreadblockShape::kK) {
continue;
}
#endif
}
cutlass::gemm::GemmCoord problem_size(m, n, k);
TestbedSymmUniversal<Symm> testbed;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_symm_universal.h/0 | {
"file_path": "test/unit/gemm/device/testbed_symm_universal.h",
"repo_id": "test",
"token_count": 8569
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#pragma once
#include "cutlass/gemm/thread/mma.h"
#include "cutlass/layout/vector.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
namespace test {
namespace gemm {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Thread-level matrix multiply-accumulate
template <typename Mma>
void kernel(
typename Mma::ElementC *D,
typename Mma::ElementA const *A,
typename Mma::ElementB const *B,
typename Mma::ElementC const *C) {
auto ptr_D = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> *>(D);
auto ptr_A = reinterpret_cast<cutlass::Array<typename Mma::ElementA, Mma::Shape::kMK> const *>(A);
auto ptr_B = reinterpret_cast<cutlass::Array<typename Mma::ElementB, Mma::Shape::kKN> const *>(B);
auto ptr_C = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> const *>(C);
Mma mma;
auto a = *ptr_A;
auto b = *ptr_B;
auto c = *ptr_C;
using Btype = typename Mma::ElementB;
cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> d;
mma(d, a, b, c);
*ptr_D = d;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC
>
struct Testbed {
/// Thread-level matrix multiply-accumulate operator
using Mma = cutlass::gemm::thread::Mma<
Shape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC
>;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
//
// Methods
//
/// Allocates workspace in device memory
Testbed() {
tensor_A.reset(cutlass::make_Coord(Shape::kM, Shape::kK), false);
tensor_B.reset(cutlass::make_Coord(Shape::kK, Shape::kN), false);
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
}
/// Runs the test
bool run() {
//
// initialize device memory
//
cutlass::reference::host::detail::RandomUniformFunc< ElementA > tfill_rand_func(
0, // seed
10, // max
0, // min
0); // bits after decimal
cutlass::reference::host::detail::TensorFillRandomUniformFunc< ElementA, LayoutA > tfill_rand(
tensor_A.host_view(),
tfill_rand_func);
for (auto i=0; i< Shape::kM; i++)
for (auto j=0; j< Shape::kK; j++)
tfill_rand(cutlass::make_Coord(i,j));
cutlass::reference::host::BlockFillSequential(
tensor_B.host_data(),
tensor_B.capacity(),
ElementB(1),
ElementB(2)
);
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
// Host side call
kernel<Mma>(
tensor_D_computed.host_data(),
tensor_A.host_data(),
tensor_B.host_data(),
tensor_C.host_data());
//
// Reference implementation
//
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC>
reference_gemm;
reference_gemm(
{Shape::kM, Shape::kN, Shape::kK},
ElementC(1),
tensor_A.host_ref(),
tensor_B.host_ref(),
ElementC(0),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
EXPECT_TRUE(passed)
<< "A:\n" << tensor_A.host_view() << "\n\n"
<< "B:\n" << tensor_B.host_view() << "\n\n"
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << std::endl;
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace gemm
} // namespace test
| test/unit/gemm/thread/host/testbed_host.h/0 | {
"file_path": "test/unit/gemm/thread/host/testbed_host.h",
"repo_id": "test",
"token_count": 2631
} | 61 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit testbed for kernel-level GEMM
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/vector.h"
#include "cutlass/numeric_types.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h"
#include "cutlass/cutlass.h"
#include "cutlass/platform/platform.h"
namespace test {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Mma>
__global__ void kernel_mma(cutlass::gemm::GemmCoord problem_size,
typename Mma::IteratorA::Params params_A,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::Params params_B,
typename Mma::IteratorB::TensorRef ref_B,
typename Mma::ElementC *ptr_C,
typename Mma::LayoutC::Stride::Index ldc) {
// Shared storage needed by threadblock-scoped matrix multiply-accumulate
__shared__ typename Mma::SharedStorage shared_storage;
// Compute threadblock location
cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y),
0};
cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.k()};
cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(),
tb_tile_offset.n() * Mma::Shape::kN};
// Compute position within threadblock
int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(params_A, ref_A.data(),
{problem_size.m(), problem_size.k()},
tb_thread_id, tb_offset_A);
typename Mma::IteratorB iterator_B(params_B, ref_B.data(),
{problem_size.k(), problem_size.n()},
tb_thread_id, tb_offset_B);
int warp_id = threadIdx.y;
int lane_id = threadIdx.x;
// Construct thread-scoped matrix multiply
Mma mma(shared_storage, tb_thread_id, warp_id, threadIdx.x);
typename Mma::FragmentC accum;
accum.clear();
int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
// Output results
typename Mma::Operator::IteratorC iterator_C({ptr_C, ldc}, lane_id);
iterator_C.add_tile_offset(
{(tb_tile_offset.m() * Mma::WarpCount::kM) +
(warp_id % Mma::WarpCount::kM),
(tb_tile_offset.n() * Mma::WarpCount::kN) +
(warp_id / Mma::WarpCount::kM)});
iterator_C.store(accum);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Threadblock-level matrix multiply-accumulate
typename MmaCore_,
/// Number of stages
int Stages = 2>
struct Testbed {
/// Threadblock-level GEMM implementation
using MmaCore = MmaCore_;
using ThreadblockShape = typename MmaCore::Shape;
using WarpShape = typename MmaCore::WarpShape;
using InstructionShape = typename MmaCore::InstructionShape;
using ElementA = typename MmaCore::ElementA;
using LayoutA = typename MmaCore::LayoutA;
using ElementB = typename MmaCore::ElementB;
using LayoutB = typename MmaCore::LayoutB;
using ElementC = typename MmaCore::ElementC;
using LayoutC = typename MmaCore::LayoutC;
static const int kStages = Stages;
// Define iterators over tiles from the A operand
static const bool use_idp4a = cutlass::platform::is_same<ElementA, int8_t>::value &&
cutlass::platform::is_same<ElementB, int8_t>::value &&
cutlass::platform::is_same<typename MmaCore::OperatorClass, cutlass::arch::OpClassSimt>::value;
static const bool transposeA = cutlass::platform::is_same< LayoutA, cutlass::layout::ColumnMajor >::value;
static const bool transposeB = cutlass::platform::is_same< LayoutB, cutlass::layout::RowMajor >::value;
using IteratorA = typename cutlass::platform::conditional< use_idp4a,
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA> ,
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA>
>::type;
// Define iterators over tiles from the B operand
using IteratorB = typename cutlass::platform::conditional< use_idp4a,
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB> ,
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB>
>::type;
// Define MmaPipeline Single Stage
using MmaPipelineSingleStage = cutlass::gemm::threadblock::MmaSingleStage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementC, LayoutC,
typename MmaCore::MmaPolicy>;
// Define MmaPipeline Two Stages
using MmaPipelineTwoStages = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementC, LayoutC,
typename MmaCore::MmaPolicy>;
// Define the threadblock-scoped pipelined matrix multiply (Select between Single vs. Two stages)
using Mma = typename cutlass::platform::conditional<(kStages==1), MmaPipelineSingleStage, MmaPipelineTwoStages>::type;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> matrix_A;
cutlass::HostTensor<ElementB, LayoutB> matrix_B;
cutlass::HostTensor<ElementC, LayoutC> matrix_C_computed;
cutlass::HostTensor<ElementC, LayoutC> matrix_C_reference;
cutlass::gemm::GemmCoord problem_size;
float alpha, beta;
//
// Methods
//
/// Allocates workspace in device memory
Testbed(int m, int n, int k, float alpha_, float beta_)
: problem_size(m, n, k), alpha(alpha_), beta(beta_) {
matrix_A.reset(cutlass::make_Coord(m, k));
matrix_B.reset(cutlass::make_Coord(k, n));
matrix_C_computed.reset(cutlass::make_Coord(m, n));
matrix_C_reference.reset(cutlass::make_Coord(m, n), false);
}
bool sufficient() {
return true;
}
/// Runs the test
bool run(
dim3 grid, dim3 block,
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_A.host_view(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_A.host_data(),
matrix_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(matrix_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_B.host_view(), seed + 16, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_B.host_data(),
matrix_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(matrix_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(matrix_C_computed.host_view());
cutlass::reference::host::TensorFill(matrix_C_reference.host_view());
matrix_A.sync_device();
matrix_B.sync_device();
matrix_C_computed.sync_device();
typename IteratorA::Params params_A(matrix_A.layout());
typename IteratorB::Params params_B(matrix_B.layout());
test::gemm::threadblock::kernel_mma<Mma><<<grid, block>>>(
problem_size, params_A, matrix_A.device_ref(), params_B,
matrix_B.device_ref(), matrix_C_computed.device_data(),
matrix_C_computed.layout().stride(0));
//
// Check error code
//
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess)
<< " kernel error: " << cudaGetErrorString(result) << " on device " << GetCudaDevice();
matrix_C_computed.sync_host();
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC,
typename MmaCore::Operator>
reference_gemm;
reference_gemm(
problem_size, ElementC(alpha), matrix_A.host_view(),
matrix_B.host_view(), ElementC(beta), matrix_C_reference.host_view());
bool passed = cutlass::reference::host::TensorEquals(
matrix_C_computed.host_view(), matrix_C_reference.host_view());
EXPECT_TRUE(passed) << "Failed on device " << GetCudaDevice();
if (!passed) {
std::ofstream output("mma_pipelined_testbed_errors.txt");
output
<< "A:\n" << matrix_A.host_view() << "\n"
<< "B:\n" << matrix_B.host_view() << "\n"
<< "Reference:\n"
<< matrix_C_reference.host_view() << "\n"
<< "Computed:\n"
<< matrix_C_computed.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace test
| test/unit/gemm/threadblock/mma_pipelined_testbed.h/0 | {
"file_path": "test/unit/gemm/threadblock/mma_pipelined_testbed.h",
"repo_id": "test",
"token_count": 5385
} | 62 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#pragma once
#include "cutlass/array.h"
namespace test {
namespace nvrtc {
namespace kernel {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Thread-level matrix multiply-accumulate
template <typename Mma>
__global__ void testbed_kernel(
typename Mma::ElementC *D,
typename Mma::ElementA const *A,
typename Mma::ElementB const *B,
typename Mma::ElementC const *C) {
auto ptr_D = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> *>(D);
auto ptr_A = reinterpret_cast<cutlass::Array<typename Mma::ElementA, Mma::Shape::kMK> const *>(A);
auto ptr_B = reinterpret_cast<cutlass::Array<typename Mma::ElementB, Mma::Shape::kKN> const *>(B);
auto ptr_C = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> const *>(C);
Mma mma;
auto a = *ptr_A;
auto b = *ptr_B;
auto c = *ptr_C;
cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> d;
mma(d, a, b, c);
*ptr_D = d;
}
}
}
}
}
| test/unit/nvrtc/kernel/thread/testbed_kernel.h/0 | {
"file_path": "test/unit/nvrtc/kernel/thread/testbed_kernel.h",
"repo_id": "test",
"token_count": 861
} | 63 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace threadblock {
///
template <typename Iterator>
__global__ void kernel_gemm_threadblock_tensor_op_multiplicand_store(
typename Iterator::TensorRef ref_output,
typename Iterator::Element *input) {
// Construct fragment
typename Iterator::Fragment frag;
frag.clear();
// each thread loads a fragment
using AccessType = cutlass::Array<typename Iterator::Element, Iterator::ThreadMap::kElementsPerAccess>;
int const kElementsPerAccess = Iterator::ThreadMap::kElementsPerAccess;
int stride = Iterator::Shape::kContiguous;
int warp_id = (threadIdx.x / 32);
int lane_id = (threadIdx.x % 32);
input += (lane_id % 8) * kElementsPerAccess + (lane_id / 8) * stride;
input += (warp_id * Iterator::Shape::kStrided / Iterator::ThreadMap::Detail::kWarpCount) * stride;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Iterator::ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Iterator::ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < Iterator::ThreadMap::kElementsPerAccess; ++v) {
frag[v + Iterator::ThreadMap::kElementsPerAccess * (c + s * Iterator::ThreadMap::Iterations::kContiguous)] =
input[v + c * 64 + s * Iterator::ThreadMap::Delta::kStrided * stride];
}
}
}
// Use iterator to store results
Iterator iter(ref_output, threadIdx.x);
iter.store(frag);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Simple test environment
template <
typename Shape_,
int WarpCount
>
class MultiplicandTileIteratorTestbed {
public:
//
// Define iterator
//
using Shape = Shape_;
using Element = cutlass::half_t;
using Layout = cutlass::layout::TensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
static int const kAdvanceRank = 1;
static int const kThreads = 32 * WarpCount;
using ThreadMap = cutlass::transform::PitchLinearWarpRakedThreadMap<
Shape,
kThreads,
cutlass::layout::PitchLinearShape<8, 4>,
128 / cutlass::sizeof_bits<Element>::value
>;
using Iterator = cutlass::transform::threadblock::RegularTileIterator<
Shape, Element, Layout, kAdvanceRank, ThreadMap
>;
public:
//
// Members
//
cutlass::HostTensor<Element, Layout> destination_tensor;
cutlass::HostTensor<Element, cutlass::layout::PitchLinear> source_tensor;
public:
MultiplicandTileIteratorTestbed():
destination_tensor({Shape::kContiguous, Shape::kStrided}),
source_tensor({Shape::kContiguous, Shape::kStrided}) {
}
bool run() {
cutlass::reference::host::BlockFillSequential(
source_tensor.host_data(),
source_tensor.capacity()
);
cutlass::reference::host::BlockFillSequential(
destination_tensor.host_data(),
destination_tensor.capacity(),
Element(0),
Element(0)
);
//
// Launch kernel
//
dim3 grid(1,1);
dim3 block(kThreads, 1);
destination_tensor.sync_device();
source_tensor.sync_device();
test::gemm::threadblock::kernel_gemm_threadblock_tensor_op_multiplicand_store<Iterator><<<
grid, block
>>>(
destination_tensor.device_ref(),
source_tensor.device_data()
);
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " - CUDA ERROR: " << cudaGetErrorString(result);
destination_tensor.sync_host();
//
// Verify
//
// Verify that its contents match the destination
int errors = 0;
for (int s = 0; s < Shape::kStrided; ++s) {
for (int c = 0; c < Shape::kContiguous; ++c) {
if (errors >= 10) {
break;
}
Element expected = source_tensor.at({c, s});
Element got = destination_tensor.at({c, s});
bool passed = (expected == got);
if (!passed) {
++errors;
}
}
}
EXPECT_EQ(errors, 0)
<< source_tensor.host_view() << "\n\n" << destination_tensor.host_view() << std::endl;
return !errors;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x8_w1) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<64, 8>, 1>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x16_w1) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<64, 16>, 1>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x16_w2) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<64, 16>, 2>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 128x8_w1) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<128, 8>, 1>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x32_w4) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<64, 32>, 4>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 128x32_w1) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<128, 32>, 1>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 128x32_w4) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<128, 32>, 4>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 256x32_w4) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<256, 32>, 4>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 256x32_w8) {
test::gemm::threadblock::MultiplicandTileIteratorTestbed<
cutlass::layout::PitchLinearShape<256, 32>, 8>().run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/transform/threadblock/regular_tile_iterator_tensor_op.cu/0 | {
"file_path": "test/unit/transform/threadblock/regular_tile_iterator_tensor_op.cu",
"repo_id": "test",
"token_count": 3054
} | 64 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for all CONV operation kinds in CUTLASS Library.
*/
#pragma once
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/kernel/default_conv2d_group_fprop.h"
#include "cutlass/conv/kernel/default_depthwise_fprop.h"
#include "cutlass/conv/kernel/default_conv2d_dgrad.h"
#include "cutlass/conv/kernel/default_conv2d_wgrad.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/conv/device/direct_convolution.h"
#include "cutlass/library/library.h"
#include "library_internal.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/core_io.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class Conv2dOperationBase : public Operation {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = Operator::kIteratorAlgorithm;
static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator;
using OperatorArguments = typename Operator::Arguments;
protected:
///
ConvDescription description_;
public:
/// Constructor
Conv2dOperationBase(char const *name = "unknown_conv2d") {
description_.name = name;
description_.provider = Provider::kCUTLASS;
description_.kind = OperationKind::kConv2d;
description_.conv_dim = Operator::kConvDim;
description_.iterator_algorithm = IteratorAlgorithmMap<Operator::kIteratorAlgorithm>::kId;
description_.tile_description.threadblock_shape = make_Coord(
Operator::ThreadblockShape::kM,
Operator::ThreadblockShape::kN,
Operator::ThreadblockShape::kK);
description_.tile_description.threadblock_stages = Operator::kStages;
description_.tile_description.warp_count = make_Coord(
Operator::UnderlyingKernel::WarpCount::kM,
Operator::UnderlyingKernel::WarpCount::kN,
Operator::UnderlyingKernel::WarpCount::kK);
description_.tile_description.math_instruction.instruction_shape = make_Coord(
Operator::InstructionShape::kM,
Operator::InstructionShape::kN,
Operator::InstructionShape::kK);
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
description_.tile_description.math_instruction.opcode_class =
OpcodeClassMap<typename Operator::OperatorClass>::kId;
description_.tile_description.math_instruction.math_operation =
MathOperationMap<typename Operator::MathOperator>::kId;
description_.tile_description.minimum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMin;
description_.tile_description.maximum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMax;
description_.A = make_TensorDescription<ElementA, LayoutA>();
description_.B = make_TensorDescription<ElementB, LayoutB>();
description_.C = make_TensorDescription<ElementC, LayoutC>();
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
// TODO: Add split k mode Serial and parallel to convolutions
// description_.split_k_mode = Operator::kSplitK ? SplitKMode::kSerial : SplitKMode::kNone;
}
/// Returns the description of the GEMM operation
virtual OperationDescription const & description() const {
return description_;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Conv2d library operation class for cutlass profiler
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class Conv2dOperation : public Conv2dOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
Conv2dOperation(char const *name = "unknown_conv2d_fprop") : Conv2dOperationBase<Operator_>(name) {
this->description_.conv_kind = ConvKindMap<kConvolutionalOperator>::kId;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
Conv2dConfiguration const *configuration) {
operator_args.problem_size = configuration->problem_size;
operator_args.ref_A =
{
nullptr,
LayoutA::packed(implicit_gemm_tensor_a_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_B =
{
nullptr,
LayoutB::packed(implicit_gemm_tensor_b_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_C =
{
nullptr,
LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_D =
{
nullptr,
LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.split_k_mode = configuration->split_k_mode;
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
ConvArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.output_op = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.output_op = params;
}
else {
return Status::kErrorInvalidProblem;
}
operator_args.ref_A.reset(static_cast<ElementA *>(const_cast<void *>(arguments->A)));
operator_args.ref_B.reset(static_cast<ElementB *>(const_cast<void *>(arguments->B)));
operator_args.ref_C.reset(static_cast<ElementC *>(const_cast<void *>(arguments->C)));
operator_args.ref_D.reset(static_cast<ElementC *>(const_cast<void *>(arguments->D)));
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
Conv2dConfiguration const *configuration =
static_cast<Conv2dConfiguration const *>(configuration_ptr);
ConvArguments const *arguments =
static_cast<ConvArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<Conv2dConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
return Operator::get_workspace_size(args);
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<Conv2dConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
//std::cout << "initialize library::Conv2dOperation" << std::endl;
//print_operator_args(args);
return op->initialize(args, device_workspace, stream);
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<ConvArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args, device_workspace);
if (status != Status::kSuccess) {
return status;
}
//std::cout << "run library::Conv2dOperation" << std::endl;
//print_operator_args(args);
return op->run(stream);
}
/// Call print_operator_args from the Conv2dOperation::initialize()
// to dump arguments passed on to cutlass operator for debugging
void print_operator_args(OperatorArguments &operator_args) const {
std::cout << "Conv2dOperation::OperatorArguments" << std::endl
<< " problem_size:" << std::endl
<< operator_args.problem_size << std::endl
<< " split_k_mode: "
<< (operator_args.split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial" : "parallel") << std::endl
<< " epilogue (alpha, beta): "
<< operator_args.output_op.alpha << ", "
<< operator_args.output_op.beta << std::endl
<< " ref_A (ptr, {stride}): "
<< operator_args.ref_A.data() << ", {"
<< operator_args.ref_A.stride(0) << ", "
<< operator_args.ref_A.stride(1) << ", "
<< operator_args.ref_A.stride(2) << "}" << std::endl
<< " ref_B (ptr, {stride}): "
<< operator_args.ref_B.data() << ", {"
<< operator_args.ref_B.stride(0) << ", "
<< operator_args.ref_B.stride(1) << ", "
<< operator_args.ref_B.stride(2) << "}" << std::endl
<< " ref_C (ptr, {stride}): "
<< operator_args.ref_C.data() << ", {"
<< operator_args.ref_C.stride(0) << ", "
<< operator_args.ref_C.stride(1) << ", "
<< operator_args.ref_C.stride(2) << "}" << std::endl
<< " ref_D (ptr, {stride}): "
<< operator_args.ref_D.data() << ", {"
<< operator_args.ref_D.stride(0) << ", "
<< operator_args.ref_D.stride(1) << ", "
<< operator_args.ref_D.stride(2) << "}" << std::endl;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// DirectConv2d library operation class for cutlass profiler
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class DirectConv2dOperation : public Conv2dOperation<Operator_> {
public:
using Operator = Operator_;
using Base = Conv2dOperation<Operator_>;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
DirectConv2dOperation(char const *name = "unknown_direct)conv2d_fprop") : Conv2dOperation<Operator_>(name) {
this->description_.conv_kind = ConvKindMap<kConvolutionalOperator>::kId;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
Conv2dConfiguration const *configuration) {
operator_args.problem_size = configuration->problem_size;
operator_args.ref_A =
{
nullptr,
LayoutA::packed(implicit_gemm_tensor_a_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_B =
{
nullptr,
LayoutB::packed(implicit_gemm_tensor_b_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_reordered_B =
{
nullptr,
LayoutB::packed(implicit_gemm_tensor_b_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_C =
{
nullptr,
LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.ref_D =
{
nullptr,
LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size))
};
operator_args.split_k_mode = configuration->split_k_mode;
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
ConvArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.output_op = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.output_op = params;
}
else {
return Status::kErrorInvalidProblem;
}
operator_args.ref_A.reset(static_cast<ElementA *>(const_cast<void *>(arguments->A)));
operator_args.ref_B.reset(static_cast<ElementB *>(const_cast<void *>(arguments->B)));
operator_args.ref_C.reset(static_cast<ElementC *>(const_cast<void *>(arguments->C)));
operator_args.ref_D.reset(static_cast<ElementC *>(const_cast<void *>(arguments->D)));
operator_args.ref_reordered_B.reset(static_cast<ElementC *>(const_cast<void *>(arguments->reordered_B)));
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
Conv2dConfiguration const *configuration =
static_cast<Conv2dConfiguration const *>(configuration_ptr);
ConvArguments const *arguments =
static_cast<ConvArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<Conv2dConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
return Operator::get_workspace_size(args);
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<Conv2dConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
//std::cout << "initialize library::Conv2dOperation" << std::endl;
//print_operator_args(args);
return op->initialize(args, device_workspace, stream);
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<ConvArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args, device_workspace);
if (status != Status::kSuccess) {
return status;
}
//std::cout << "run library::Conv2dOperation" << std::endl;
//print_operator_args(args);
return op->run(stream);
}
/// Call print_operator_args from the Conv2dOperation::initialize()
// to dump arguments passed on to cutlass operator for debugging
void print_operator_args(OperatorArguments &operator_args) const {
std::cout << "Conv2dOperation::OperatorArguments" << std::endl
<< " problem_size:" << std::endl
<< operator_args.problem_size << std::endl
<< " split_k_mode: "
<< (operator_args.split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial" : "parallel") << std::endl
<< " epilogue (alpha, beta): "
<< operator_args.output_op.alpha << ", "
<< operator_args.output_op.beta << std::endl
<< " ref_A (ptr, {stride}): "
<< operator_args.ref_A.data() << ", {"
<< operator_args.ref_A.stride(0) << ", "
<< operator_args.ref_A.stride(1) << ", "
<< operator_args.ref_A.stride(2) << "}" << std::endl
<< " ref_B (ptr, {stride}): "
<< operator_args.ref_B.data() << ", {"
<< operator_args.ref_B.stride(0) << ", "
<< operator_args.ref_B.stride(1) << ", "
<< operator_args.ref_B.stride(2) << "}" << std::endl
<< " ref_C (ptr, {stride}): "
<< operator_args.ref_C.data() << ", {"
<< operator_args.ref_C.stride(0) << ", "
<< operator_args.ref_C.stride(1) << ", "
<< operator_args.ref_C.stride(2) << "}" << std::endl
<< " ref_D (ptr, {stride}): "
<< operator_args.ref_D.data() << ", {"
<< operator_args.ref_D.stride(0) << ", "
<< operator_args.ref_D.stride(1) << ", "
<< operator_args.ref_D.stride(2) << "}" << std::endl;
}
};
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/conv2d_operation.h/0 | {
"file_path": "tools/library/src/conv2d_operation.h",
"repo_id": "tools",
"token_count": 8020
} | 65 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for all CONV operation kinds in CUTLASS Library
*/
#pragma once
#include <iostream>
#include <sstream>
#include <cstring>
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "cutlass/library/util.h"
#include "library_internal.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/device/convolution.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
Provider kProvider,
cutlass::conv::Operator ConvolutionalOperator,
int ConvDim,
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
struct ConvReferenceDispatcher;
/// Dispatcher for Conv2d (partially specialized for kConvDim == 2)
template <
Provider kProvider,
cutlass::conv::Operator kConvolutionalOperator,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator,
typename ConvertOp,
typename InnerProductOp
>
struct ConvReferenceDispatcher<
kProvider,
kConvolutionalOperator,
2,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp> {
static Status dispatch(
void const *configuration,
ElementA *ptr_A,
ElementB *ptr_B,
ElementC *ptr_C,
ElementC *ptr_D,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr
) {
Conv2dConfiguration const &config =
*static_cast<Conv2dConfiguration const *>(configuration);
// TODO: make below code more general. It is fixed for NHWC now.
layout::TensorNHWC layout_a;
layout::TensorNHWC layout_b;
layout::TensorNHWC layout_c;
layout_a.stride() =
make_Coord(int32_t(config.stride_a[0]),
int32_t(config.stride_a[1]),
int32_t(config.stride_a[2]));
layout_b.stride() =
make_Coord(int32_t(config.stride_b[0]),
int32_t(config.stride_b[1]),
int32_t(config.stride_b[2]));
layout_c.stride() =
make_Coord(int32_t(config.stride_c[0]),
int32_t(config.stride_c[1]),
int32_t(config.stride_c[2]));
if (kProvider == Provider::kReferenceHost) {
cutlass::reference::host::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC ,
LayoutC,
ElementCompute,
ElementAccumulator,
ElementC,
ConvertOp,
InnerProductOp
>(
kConvolutionalOperator,
config.problem_size,
{ptr_A, layout_a},
{ptr_B, layout_b},
{ptr_C, layout_c},
{ptr_D, layout_c},
alpha,
beta
);
return Status::kSuccess;
}
else if (kProvider == Provider::kReferenceDevice) {
return cutlass::reference::device::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp
>(
kConvolutionalOperator,
config.problem_size,
{ptr_A, layout_a},
{ptr_B, layout_b},
{ptr_C, layout_c},
{ptr_D, layout_c},
alpha,
beta,
stream
);
}
return Status::kErrorNotSupported;
}
};
/// Dispatcher for Conv3d (partially specialized for kConvDim == 3)
template <
Provider kProvider,
cutlass::conv::Operator kConvolutionalOperator,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator,
typename ConvertOp,
typename InnerProductOp
>
struct ConvReferenceDispatcher<
kProvider,
kConvolutionalOperator,
3,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp> {
static Status dispatch(
void const *configuration,
ElementA *ptr_A,
ElementB *ptr_B,
ElementC *ptr_C,
ElementC *ptr_D,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr
) {
Conv3dConfiguration const &config =
*static_cast<Conv3dConfiguration const *>(configuration);
ConvKind const conv_kind = ConvKindMap<kConvolutionalOperator>::kId;
if (kProvider == Provider::kReferenceHost) {
cutlass::reference::host::Conv3d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC ,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp
>(
kConvolutionalOperator,
config.problem_size,
{ptr_A, config.layout_a(conv_kind)},
{ptr_B, config.layout_b(conv_kind)},
{ptr_C, config.layout_c(conv_kind)},
{ptr_D, config.layout_c(conv_kind)},
alpha,
beta
);
return Status::kSuccess;
}
else if (kProvider == Provider::kReferenceDevice) {
return cutlass::reference::device::Conv3d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp
>(
kConvolutionalOperator,
config.problem_size,
{ptr_A, config.layout_a(conv_kind)},
{ptr_B, config.layout_b(conv_kind)},
{ptr_C, config.layout_c(conv_kind)},
{ptr_D, config.layout_c(conv_kind)},
alpha,
beta,
stream
);
}
return Status::kErrorNotSupported;
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
Provider Provider_,
cutlass::conv::Operator ConvolutionalOperator,
int ConvDim,
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
class ConvReferenceOperation : public Operation {
public:
static Provider const kProvider = Provider_;
static cutlass::conv::Operator const kConvolutionalOperator = ConvolutionalOperator;
static int const kConvDim = ConvDim;
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementCompute = ElementCompute_;
using ElementAccumulator = ElementAccumulator_;
using ConvertOp = ConvertOp_;
using InnerProductOp = InnerProductOp_;
protected:
/// Storage for the name string
std::string name_;
///
ConvDescription description_;
public:
/// Constructor
ConvReferenceOperation() {
// Basic information
description_.provider = kProvider;
description_.kind = (kConvDim == 2 ? OperationKind::kConv2d : OperationKind::kConv3d);
description_.conv_kind = ConvKindMap<kConvolutionalOperator>::kId;
description_.conv_dim = kConvDim;
// Tensor description
description_.A = make_TensorDescription<ElementA, LayoutA>();
description_.B = make_TensorDescription<ElementB, LayoutB>();
description_.C = make_TensorDescription<ElementC, LayoutC>();
// Epilogue compute and accumulator type description
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
// Iterator algorithm for convolution reference
description_.iterator_algorithm = IteratorAlgorithmID::kNone;
// Compute capability for convolution reference
description_.tile_description.minimum_compute_capability =
(kProvider == Provider::kReferenceDevice ? 50 : 0);
description_.tile_description.maximum_compute_capability = 1024;
// Procedural name
std::stringstream ss;
ss << "conv" << kConvDim << "d_" << to_string(description_.conv_kind)
<< "_reference_" << to_string(description_.provider)
<< "_" << to_string(description_.A.element) << to_string(description_.A.layout)
<< "_" << to_string(description_.B.element) << to_string(description_.B.layout)
<< "_" << to_string(description_.C.element) << to_string(description_.C.layout)
<< "_" << to_string(description_.tile_description.math_instruction.element_accumulator);
name_ = ss.str();
description_.name = name_.c_str();
// Epilogue compute and accumulator type description
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
}
/// Returns the description of the GEMM operation
virtual OperationDescription const & description() const {
return description_;
}
virtual Status can_implement(
void const *configuration,
void const *arguments) const {
return Status::kSuccess;
}
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
switch (kConvDim) {
case 2:
return sizeof(Conv2dConfiguration);
case 3:
return sizeof(Conv3dConfiguration);
default:
break;
}
return 0;
}
virtual uint64_t get_device_workspace_size(
void const *configuration,
void const *arguments = nullptr) const {
return 0;
}
virtual Status initialize(
void const *configuration,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
std::memcpy(host_workspace, configuration, get_host_workspace_size(configuration));
return Status::kSuccess;
}
virtual Status run(
void const *arguments,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
ConvArguments const &args = *static_cast<ConvArguments const *>(arguments);
ElementCompute alpha;
ElementCompute beta;
alpha = *static_cast<ElementCompute const *>(args.alpha);
beta = *static_cast<ElementCompute const *>(args.beta);
// TODO - respect pointer mode
// Invoke 2D or 3D convolution
return detail::ConvReferenceDispatcher<
kProvider,
kConvolutionalOperator,
kConvDim,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp
>::dispatch(
host_workspace,
static_cast<ElementA *>(const_cast<void *>(args.A)),
static_cast<ElementB *>(const_cast<void *>(args.B)),
static_cast<ElementC *>(const_cast<void *>(args.C)),
static_cast<ElementC *>(args.D),
alpha,
beta,
stream
);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructs Fprop reference operators.
template <
int kConvDim,
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_conv_fprop(Manifest &manifest) {
manifest.append(new ConvReferenceOperation<
Provider::kReferenceHost,
cutlass::conv::Operator::kFprop,
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>);
manifest.append(new ConvReferenceOperation<
Provider::kReferenceDevice,
cutlass::conv::Operator::kFprop,
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>);
}
/// Constructs Dgrad and Wgrad reference operators.
template <
int kConvDim,
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_conv_backwards(Manifest &manifest) {
manifest.append(new ConvReferenceOperation<
Provider::kReferenceHost,
cutlass::conv::Operator::kDgrad,
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>);
manifest.append(new ConvReferenceOperation<
Provider::kReferenceDevice,
cutlass::conv::Operator::kDgrad,
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>);
manifest.append(new ConvReferenceOperation<
Provider::kReferenceHost,
cutlass::conv::Operator::kWgrad,
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>);
manifest.append(new ConvReferenceOperation<
Provider::kReferenceDevice,
cutlass::conv::Operator::kWgrad,
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>);
}
/// Six operators for the price of one.
template <
int kConvDim,
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_conv_all(Manifest &manifest) {
make_conv_fprop<
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_conv_backwards<
kConvDim,
ElementA_, LayoutA_,
ElementB_, LayoutB_,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ConvertOp_,
InnerProductOp_
>(manifest);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/reference/conv_reference_operation.h/0 | {
"file_path": "tools/library/src/reference/conv_reference_operation.h",
"repo_id": "tools",
"token_count": 6444
} | 66 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
// Profiler includes
#include "cutlass/profiler/cutlass_profiler.h"
#include "cutlass/profiler/gemm_operation_profiler.h"
#include "cutlass/profiler/rank_k_operation_profiler.h"
#include "cutlass/profiler/rank_2k_operation_profiler.h"
#include "cutlass/profiler/trmm_operation_profiler.h"
#include "cutlass/profiler/symm_operation_profiler.h"
#include "cutlass/profiler/conv2d_operation_profiler.h"
#include "cutlass/profiler/conv3d_operation_profiler.h"
#include "cutlass/profiler/sparse_gemm_operation_profiler.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
CutlassProfiler::CutlassProfiler(
Options const &options
):
options_(options) {
operation_profilers_.emplace_back(new GemmOperationProfiler(options));
operation_profilers_.emplace_back(new SparseGemmOperationProfiler(options));
operation_profilers_.emplace_back(new Conv2dOperationProfiler(options));
operation_profilers_.emplace_back(new Conv3dOperationProfiler(options));
operation_profilers_.emplace_back(new RankKOperationProfiler(options));
operation_profilers_.emplace_back(new Rank2KOperationProfiler(options));
operation_profilers_.emplace_back(new TrmmOperationProfiler(options));
operation_profilers_.emplace_back(new SymmOperationProfiler(options));
}
CutlassProfiler::~CutlassProfiler() {
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Execute the program
int CutlassProfiler::operator()() {
if (options_.cmdline.num_naked_args() > 0) {
std::cerr << "Unknown args: \n";
options_.cmdline.print_naked_args(std::cerr);
std::cerr << "\n\n\n";
print_usage_(std::cout);
return 1;
}
if (options_.about.help) {
if (options_.operation_kind == library::OperationKind::kInvalid) {
print_usage_(std::cout);
}
else {
for (auto & profiler : operation_profilers_) {
if (profiler->kind() == options_.operation_kind) {
profiler->print_usage(std::cout);
profiler->print_examples(std::cout);
return 0;
}
}
}
return 0;
}
else if (options_.about.version) {
options_.about.print_version(std::cout);
std::cout << std::endl;
return 0;
}
else if (options_.about.device_info) {
options_.device.print_device_info(std::cout);
return 0;
}
if (options_.execution_mode == ExecutionMode::kProfile ||
options_.execution_mode == ExecutionMode::kDryRun ||
options_.execution_mode == ExecutionMode::kTrace) {
// Profiles all operations
return profile_();
}
else if (options_.execution_mode == ExecutionMode::kEnumerate) {
// Enumerates all operations
enumerate_();
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumerates all operations
void CutlassProfiler::enumerate_() {
}
/// Profiles all operations
int CutlassProfiler::profile_() {
int result = 0;
DeviceContext device_context;
// For all profilers
for (auto & profiler : operation_profilers_) {
if (options_.operation_kind == library::OperationKind::kInvalid ||
options_.operation_kind == profiler->kind()) {
result = profiler->profile_all(options_, library::Singleton::get().manifest, device_context);
if (result) {
return result;
}
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Prints all options
void CutlassProfiler::print_usage_(std::ostream &out) {
options_.print_usage(out);
out << "\nOperations:\n\n";
// For all profilers
for (auto & profiler : operation_profilers_) {
std::string kind_str = library::to_string(profiler->kind());
size_t kAlignment = 40;
size_t columns = 0;
if (kind_str.size() < kAlignment) {
columns = kAlignment - kind_str.size();
}
out << " " << kind_str << std::string(columns, ' ') << profiler->description() << "\n";
}
out << "\n\nFor details about a particular function, specify the function name with --help.\n\nExample:\n\n"
<< " $ cutlass_profiler --operation=Gemm --help\n\n"
<< " $ cutlass_profiler --operation=RankK --help\n\n"
<< " $ cutlass_profiler --operation=Trmm --help\n\n"
<< " $ cutlass_profiler --operation=Symm --help\n\n"
<< " $ cutlass_profiler --operation=Conv3d --help\n\n"
<< " $ cutlass_profiler --operation=Conv2d --help\n\n"
<< " $ cutlass_profiler --operation=SparseGemm --help\n\n"
;
}
/// Prints usage
void CutlassProfiler::print_options_(std::ostream &out) {
options_.print_options(out);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Initializes the CUDA device
void CutlassProfiler::initialize_device_() {
cudaError_t result = cudaSetDevice(options_.device.device);
if (result != cudaSuccess) {
std::cerr << "Failed to set device.";
throw std::runtime_error("Failed to set device");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/cutlass_profiler.cu/0 | {
"file_path": "tools/profiler/src/cutlass_profiler.cu",
"repo_id": "tools",
"token_count": 2240
} | 67 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cutlass/profiler/cublas_helpers.h"
#include "cutlass/profiler/trmm_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
TrmmOperationProfiler::TrmmOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kTrmm,
{
{ArgumentTypeID::kEnumerated, {"trmm_kind"}, "Variant of TRMM (universal)"},
{ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the TRMM problem space"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the TRMM problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kEnumerated, {"side_mode"}, "Side Mode for TRMM (left, right)"},
{ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for TRMM (lower, upper)"},
{ArgumentTypeID::kEnumerated, {"diag_type"}, "Diag Type for TRMM (nonunit, unit)"},
{ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
{ArgumentTypeID::kTensor, {"D"}, "Tensor storing the D operand"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of TRMMs computed in one batch"},
},
{ library::Provider::kCUBLAS}
) {
description_ = " Triangular Matrix-Multiplication. D = alpha * A * B or alpha * B * A";
}
/// Destructor
TrmmOperationProfiler::~TrmmOperationProfiler() {
}
/// Prints usage statement for the math function
void TrmmOperationProfiler::print_usage(std::ostream &out) const {
out << "TRMM" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void TrmmOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size:\n"
<< " $ cutlass_profiler --operation=Trmm --n=1024 --m=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=Trmm --n=1024:4096:256 --m=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=Trmm --accumulator-type=f16,f32\n\n"
<< "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=Trmm --A=f16:column or --A=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=Trmm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=Trmm --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=Trmm --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=Trmm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to trmm kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=Trmm \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status TrmmOperationProfiler::TrmmProblem::parse(
library::TrmmDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.D, "D", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
if (operation_desc.side_mode == SideMode::kLeft) {
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->m), int(this->m)}).front();
}
else if (operation_desc.side_mode == SideMode::kRight) {
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->n), int(this->n)}).front();
}
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->m), int(this->n)}).front();
this->ldd = DeviceAllocation::get_packed_layout(
operation_desc.D.layout, {int(this->m), int(this->n)}).front();
return Status::kSuccess;
}
/// Initializes a performance result
void TrmmOperationProfiler::TrmmProblem::initialize_result(
PerformanceResult &result,
library::TrmmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "trmm_kind", problem_space, library::to_string(operation_desc.trmm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "side_mode", problem_space, library::to_string(operation_desc.side_mode));
set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
set_argument(result, "diag_type", problem_space, library::to_string(operation_desc.diag_type));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
set_argument(result, "D", problem_space,
std::string(library::to_string(operation_desc.D.element)) + ":" + library::to_string(operation_desc.D.layout));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status TrmmOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::TrmmDescription const &operation_desc =
static_cast<library::TrmmDescription const &>(operation->description());
if (operation_desc.trmm_kind != library::TrmmKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
trmm_workspace_.configuration.problem_size.m() = int(problem_.m);
trmm_workspace_.configuration.problem_size.n() = int(problem_.n);
trmm_workspace_.configuration.problem_size.k() = (operation_desc.side_mode == SideMode::kLeft)
? int(problem_.m) : int(problem_.n);
trmm_workspace_.configuration.lda = problem_.lda;
trmm_workspace_.configuration.ldb = problem_.ldb;
trmm_workspace_.configuration.ldd = problem_.ldd;
//trmm_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
trmm_workspace_.configuration.batch_count = int(problem_.split_k_slices);
trmm_workspace_.arguments.A = nullptr;
trmm_workspace_.arguments.B = nullptr;
trmm_workspace_.arguments.D = nullptr;
trmm_workspace_.arguments.alpha = problem_.alpha.data();
trmm_workspace_.arguments.beta = problem_.beta.data();
trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&trmm_workspace_.configuration, &trmm_workspace_.arguments);
}
/// Initializes the performance result
void TrmmOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::TrmmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
if (operation_desc.side_mode == SideMode::kLeft) {
// Input bytes read and Output bytes written for the trmm problem
result.bytes =
// Half matrix including the diagonal will have (M*(M+1))/2 elements
int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) * (problem_.m + 1) / 2 +
int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n +
int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n;
} else if (operation_desc.side_mode == SideMode::kRight) {
// Input bytes read and Output bytes written for the trmm problem
result.bytes =
// Half matrix including the diagonal will have (N*(N+1))/2 elements
int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.n / 8) * (problem_.n + 1) / 2 +
int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n +
int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n;
}
// FLOPs = 2 * [ ( M * (M+1)/2 * N ) ] // Beta is zero
result.flops = problem_.m * (problem_.m + 1) * problem_.n;
result.runtime = 0;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
result.flops *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
result.flops *= 4;
break;
default: break;
}
}
/// Initializes workspace
Status TrmmOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::TrmmDescription const &operation_desc =
static_cast<library::TrmmDescription const &>(operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
int seed_shift = 0;
if (operation_desc.side_mode == SideMode::kLeft) {
trmm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.m), int(problem_.m)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
} else if (operation_desc.side_mode == SideMode::kRight) {
trmm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
}
trmm_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldb)},
1, // batch_count
seed_shift++
);
trmm_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.D.element,
operation_desc.D.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldd)}
);
trmm_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.D.element,
operation_desc.D.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldd)}
);
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(&trmm_workspace_.configuration);
trmm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(&trmm_workspace_.configuration);
trmm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = operation->initialize(
&trmm_workspace_.configuration,
trmm_workspace_.host_workspace.data(),
trmm_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kTrmm;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool TrmmOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing TRMM arguments
trmm_workspace_.arguments.A = trmm_workspace_.A->data();
trmm_workspace_.arguments.B = trmm_workspace_.B->data();
trmm_workspace_.arguments.D = trmm_workspace_.Computed->data();
trmm_workspace_.arguments.alpha = problem_.alpha.data();
trmm_workspace_.arguments.beta = problem_.beta.data();
trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&trmm_workspace_.arguments,
trmm_workspace_.host_workspace.data(),
trmm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const & trmm_desc = static_cast<library::TrmmDescription const &>(operation->description());
if (cublas_satisfies(trmm_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool TrmmOperationProfiler::verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
#if CUTLASS_ENABLE_CUBLAS
library::TrmmDescription const &trmm_desc =
static_cast<library::TrmmDescription const &>(operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
cublasStatus_t status = handle.get_cublas_create_status();
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublas<t>Trmm()
//
// Initialize structure containing TRMM arguments
trmm_workspace_.arguments.A = trmm_workspace_.A->data();
trmm_workspace_.arguments.B = trmm_workspace_.B->data();
trmm_workspace_.arguments.D = trmm_workspace_.Reference->data();
trmm_workspace_.arguments.alpha = problem_.alpha.data();
trmm_workspace_.arguments.beta = problem_.beta.data();
trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
detail::cublasTrmmDispatcher trmm_op(
trmm_desc,
trmm_workspace_.configuration,
trmm_workspace_.arguments
);
if (trmm_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = trmm_op(handle);
// Handle errors
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
options,
*trmm_workspace_.Computed,
*trmm_workspace_.Reference
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
trmm_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool TrmmOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing TRMM arguments
trmm_workspace_.arguments.A = trmm_workspace_.A->data();
trmm_workspace_.arguments.B = trmm_workspace_.B->data();
trmm_workspace_.arguments.D = trmm_workspace_.Computed->data();
trmm_workspace_.arguments.alpha = problem_.alpha.data();
trmm_workspace_.arguments.beta = problem_.beta.data();
trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&trmm_workspace_.arguments,
trmm_workspace_.host_workspace.data(),
trmm_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/trmm_operation_profiler.cu/0 | {
"file_path": "tools/profiler/src/trmm_operation_profiler.cu",
"repo_id": "tools",
"token_count": 8624
} | 68 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/*! \file
\brief This header contains a class to parametrize a statistical distribution function.
*/
#include <ostream>
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Distribution type
struct Distribution {
/// Variant types
enum Kind { Invalid, Uniform, Gaussian, Identity, Sequential, AllZeros, AllOnes };
/// Distribution state
union {
/// Uniform distribution
struct {
double min;
double max;
} uniform;
/// Gaussian distribution
struct {
double mean;
double stddev;
double pnz;
double pnzA;
double pnzB;
double pnzC;
} gaussian;
/// Elements are linear combination of row and column index
struct {
double start;
double delta;
} sequential;
};
/// Active variant kind
Kind kind;
/// Random values are cast to integer after scaling by this power of two
int int_scale;
//
// Methods
//
Distribution() : kind(Invalid), int_scale(0) {}
/// Configures distribution as uniform random
Distribution &set_uniform(double _min, double _max, int _int_scale = 0) {
kind = Uniform;
uniform.min = _min;
uniform.max = _max;
int_scale = _int_scale;
return *this;
}
/// Configures distribution as Gaussian distribution
Distribution &set_gaussian(double _mean, double _stddev, int _int_scale = 0, double _pnz = 100.0) {
kind = Gaussian;
gaussian.mean = _mean;
gaussian.stddev = _stddev;
gaussian.pnz = _pnz;
int_scale = _int_scale;
return *this;
}
/// Sets identity
Distribution &set_identity() {
kind = Identity;
return *this;
}
/// Sets sequential
Distribution &set_sequential(double start, double delta, int _int_scale = 0) {
kind = Sequential;
sequential.start = start;
sequential.delta = delta;
int_scale = _int_scale;
return *this;
}
};
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Prints a Distribution to ostream
inline std::ostream &operator<<(std::ostream &out, cutlass::Distribution const &dist) {
switch (dist.kind) {
case cutlass::Distribution::Uniform:
out << "uniform, min: " << dist.uniform.min << ", max: " << dist.uniform.max;
break;
case cutlass::Distribution::Gaussian:
out << "gaussian, mean: " << dist.gaussian.mean << ", stddev: " << dist.gaussian.stddev
<< ", pnzA: " << dist.gaussian.pnzA << ", pnzB: "
<< dist.gaussian.pnzB << ", pnzC: " << dist.gaussian.pnzC;
break;
case cutlass::Distribution::Identity:
out << "identity";
break;
case cutlass::Distribution::Sequential:
out << "sequential";
break;
default:
out << "unknown";
}
out << ", int_scale: " << dist.int_scale;
return out;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/distribution.h/0 | {
"file_path": "tools/util/include/cutlass/util/distribution.h",
"repo_id": "tools",
"token_count": 1485
} | 69 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for complex-valued GEMM in device code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/complex.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_ref_planar_complex.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
namespace cutlass {
namespace reference {
namespace device {
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
////////////////////////////////////////////////////////////////////////////////////////////////////
static int const kGemmPlanarComplexBlockSize = 4;
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename ConvertOp = NumericConverter<ElementC, ScalarType>,
typename InnerProductOp = multiply_add<complex<ComputeType>>
>
__global__ void GemmPlanarComplex(
gemm::GemmCoord problem_size,
complex<ScalarType> alpha,
TensorRefPlanarComplex<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRefPlanarComplex<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
complex<ScalarType> beta,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_c,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_d,
complex<ComputeType> initial_accum) {
int const kMblock = kGemmPlanarComplexBlockSize;
int const kNblock = kGemmPlanarComplexBlockSize;
using ComplexA = typename TensorRefPlanarComplex<ElementA, LayoutA>::ComplexElement;
using ComplexB = typename TensorRefPlanarComplex<ElementB, LayoutB>::ComplexElement;
using ComplexC = typename TensorRefPlanarComplex<ElementC, LayoutC>::ComplexElement;
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
int const K = problem_size.k();
ConvertOp convert_op;
InnerProductOp inner_product_op;
complex<ComputeType> accum[kMblock][kNblock];
int row_block = (blockIdx.x * blockDim.x + threadIdx.x) * kMblock;
int col_block = (blockIdx.y * blockDim.y + threadIdx.y) * kNblock;
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kNblock; j++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kMblock; i++) {
accum[i][j] = initial_accum;
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int k_block = 0; k_block < K; ++k_block) {
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kNblock; j++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kMblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N) {
ComplexA a_ik = tensor_a.at(MatrixCoord(row, k_block));
ComplexB b_kj = tensor_b.at(MatrixCoord(k_block, col));
complex<ComputeType> a = complex<ComputeType>{
ComputeType(a_ik.real()),
ComputeType(a_ik.imag())
};
complex<ComputeType> b = complex<ComputeType>{
ComputeType(b_kj.real()),
ComputeType(b_kj.imag())
};
if (transform_a == ComplexTransform::kConjugate) {
a = conj(a);
}
if (transform_b == ComplexTransform::kConjugate) {
b = conj(b);
}
accum[i][j] = inner_product_op(a, b, accum[i][j]);
}
}
}
}
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kNblock; j++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kMblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
complex<ScalarType> acc{
ScalarType(accum[i][j].real()),
ScalarType(accum[i][j].imag())
};
ComplexC c_ij = ComplexC();
if (beta.real() != ScalarType() || beta.imag() != ScalarType()) {
c_ij = tensor_c.at(coord);
}
complex<ScalarType> src{
ScalarType(c_ij.real()),
ScalarType(c_ij.imag())
};
complex<ScalarType> result = alpha * acc + beta * src;
ComplexC d_ij;
d_ij.real() = convert_op(result.real());
d_ij.imag() = convert_op(result.imag());
tensor_d.at(coord) = d_ij;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename ConvertOp = NumericConverter<ElementC, ScalarType>,
typename InnerProductOp = multiply_add<complex<ComputeType>>
>
void GemmPlanarComplex(
gemm::GemmCoord problem_size,
complex<ScalarType> alpha,
TensorRefPlanarComplex<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRefPlanarComplex<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
complex<ScalarType> beta,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_c,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_d,
complex<ComputeType> initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
int const kMblock = kernel::kGemmPlanarComplexBlockSize;
int const kNblock = kernel::kGemmPlanarComplexBlockSize;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * kMblock - 1) / (block.x * kMblock),
(problem_size.n() + block.y * kNblock - 1) / (block.y * kNblock),
1);
kernel::GemmPlanarComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ScalarType,
ComputeType,
ConvertOp,
InnerProductOp
><<< grid, block >>>(
problem_size,
alpha,
tensor_a,
transform_a,
tensor_b,
transform_b,
beta,
tensor_c,
tensor_d,
initial_accum
);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// This assumes the accumulator type is the same type as the scalars.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType
>
void GemmPlanarComplex(
gemm::GemmCoord problem_size,
complex<ScalarType> alpha,
TensorRefPlanarComplex<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRefPlanarComplex<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
complex<ScalarType> beta,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_c,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_d) {
GemmPlanarComplex(
problem_size,
alpha,
tensor_a, transform_a,
tensor_b, transform_b,
beta,
tensor_c,
tensor_d,
complex<ScalarType>());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/device/gemm_planar_complex.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/gemm_planar_complex.h",
"repo_id": "tools",
"token_count": 3432
} | 70 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for complex-valued GEMM in host-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename ElementD = ElementC,
typename ConvertOp = NumericConverter<ElementD, ScalarType>,
typename InnerProductOp = multiply_add<ComputeType>
>
void GemmComplex(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRef<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementD, LayoutC> tensor_d,
ComputeType initial_accum,
int batch_count = 1,
int64_t batch_stride_A = 0,
int64_t batch_stride_B = 0,
int64_t batch_stride_C = 0,
int64_t batch_stride_D = 0) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Mblock = 16;
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
for (int batch_idx = 0; batch_idx < batch_count; ++batch_idx) {
// Compute matrix product using blocks
for (int row_block = 0; row_block < M; row_block += Mblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Mblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N) {
ElementA a = tensor_a.at(MatrixCoord(row, k_block));
ElementB b = tensor_b.at(MatrixCoord(k_block, col));
ComputeType a_ik = ComputeType(a);
ComputeType b_kj = ComputeType(b);
if (transform_a == ComplexTransform::kConjugate) {
a_ik = conj(a_ik);
}
if (transform_b == ComplexTransform::kConjugate) {
b_kj = conj(b_kj);
}
accum[i][j] = inner_product_op(a_ik, b_kj, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]) +
beta * ScalarType(tensor_c.at(coord)));
}
}
}
} // for (col_block)
} // for (row_block)
tensor_a.add_pointer_offset(batch_stride_A);
tensor_b.add_pointer_offset(batch_stride_B);
tensor_c.add_pointer_offset(batch_stride_C);
tensor_d.add_pointer_offset(batch_stride_D);
} // for (batch_idx)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// This assumes the accumulator type is the same type as the scalars.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ElementD = ElementC
>
void GemmComplex(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRef<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementD, LayoutC> tensor_d) {
GemmComplex(problem_size, alpha, tensor_a, transform_a, tensor_b, transform_b, beta, tensor_c, tensor_d, ScalarType(0));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/host/gemm_complex.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/gemm_complex.h",
"repo_id": "tools",
"token_count": 2608
} | 71 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cmath>
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/util/reference/detail/linear_to_coordinate.h"
#include "cutlass/core_io.h"
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side
/// workspace
template <
typename Element,
typename Layout,
typename ComputeType,
typename ReduceOp,
typename TransformOp
>
ComputeType TensorTransformReduce(
TensorView<Element, Layout> view,
ComputeType identity,
ReduceOp reduce,
TransformOp transform
) {
for (int64_t idx = 0; idx < int64_t(view.size()); ++idx) {
typename Layout::TensorCoord coord;
cutlass::reference::detail::LinearToCoordinate<Layout::kRank>()(coord, idx, view.extent());
if (view.contains(coord)) {
Element x = view.at(coord);
identity = reduce(identity, transform(x));
}
}
return identity;
}
/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side
/// workspace
template <
typename Element,
typename Layout,
typename ComputeType,
typename ReduceOp,
typename TransformOp
>
ComputeType TensorTransformReduce(
TensorView<Element, Layout> view_A,
TensorView<Element, Layout> view_B,
ComputeType identity,
ReduceOp reduce,
TransformOp transform) {
if (view_A.extent() != view_B.extent()) {
throw std::runtime_error("Tensor extents must match.");
}
for (int64_t idx = 0; idx < int64_t(view_A.size()); ++idx) {
typename Layout::TensorCoord coord;
cutlass::reference::detail::LinearToCoordinate<Layout::kRank>()(coord, idx, view_A.extent());
if (view_A.contains(coord)) {
Element a = view_A.at(coord);
Element b = view_B.at(coord);
identity = reduce(identity, transform(a, b));
}
}
return identity;
}
/// Helper to compute the sum of the elements of a tensor
template <
typename Element,
typename Layout,
typename ComputeType = Element
>
ComputeType TensorSum(
TensorView<Element, Layout> view,
ComputeType identity = ComputeType()
) {
plus<ComputeType> reduce;
NumericConverter<ComputeType, Element> transform;
return TensorTransformReduce(
view, identity, reduce, transform);
}
/// Helper to compute the sum of the squares of the elements of a tensor
template <
typename Element,
typename Layout,
typename ComputeType = Element
>
ComputeType TensorSumSq(
TensorView<Element, Layout> view,
ComputeType identity = ComputeType()
) {
plus<ComputeType> reduce;
magnitude_squared<Element, ComputeType> transform;
return TensorTransformReduce(
view, identity, reduce, transform);
}
/// Helper to compute the norm of the elements of a tensor.
template <
typename Element,
typename Layout,
typename ComputeType = double
>
ComputeType TensorNorm(
TensorView<Element, Layout> view,
ComputeType identity = ComputeType()
) {
return std::sqrt(TensorSumSq(view, identity));
}
/// Helper to compute the sum of the squares of the differences of two tensors
template <
typename Element,
typename Layout,
typename ComputeType = double
>
ComputeType TensorSumSqDiff(
TensorView<Element, Layout> view_A,
TensorView<Element, Layout> view_B,
ComputeType identity = ComputeType()
) {
plus<ComputeType> reduce;
magnitude_squared_difference<Element, ComputeType> transform;
return TensorTransformReduce(
view_A, view_B, identity, reduce, transform);
}
/// Helper to compute the norm of the tensor computed as the difference of two tensors in memory
template <
typename Element,
typename Layout,
typename ComputeType = double
>
ComputeType TensorNormDiff(
TensorView<Element, Layout> view_A,
TensorView<Element, Layout> view_B,
ComputeType identity = ComputeType()
) {
return std::sqrt(TensorSumSqDiff(view_A, view_B, identity));
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/host/tensor_reduce.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/tensor_reduce.h",
"repo_id": "tools",
"token_count": 1810
} | 72 |
cff-version: 1.2.0
title: CUTLASS
message: >-
If you use this software, please cite using the
following metadata.
type: software
authors:
- given-names: Vijay
family-names: Thakkar
email: vithakkar@nvidia.com
affiliation: NVIDIA
- given-names: Pradeep
family-names: Ramani
email: prramani@nvidia.com
affiliation: NVIDIA
- given-names: Cris
family-names: Cecka
email: ccecka@nvidia.com
affiliation: NVIDIA
- given-names: Aniket
family-names: Shivam
email: ashivam@nvidia.com
affiliation: NVIDIA
- given-names: Honghao
family-names: Lu
email: honghaol@nvidia.com
affiliation: NVIDIA
- given-names: Ethan
family-names: Yan
email: etyan@nvidia.com
affiliation: NVIDIA
- given-names: Jack
family-names: Kosaian
email: jkosaian@nvidia.com
affiliation: NVIDIA
- given-names: Mark
family-names: Hoemmen
email: mhoemmen@nvidia.com
affiliation: NVIDIA
- given-names: Haicheng
family-names: Wu
email: haichengw@nvidia.com
affiliation: NVIDIA
- given-names: Andrew
family-names: Kerr
email: akerr@nvidia.com
affiliation: NVIDIA
- given-names: Matt
family-names: Nicely
email: mnicely@nvidia.com
affiliation: NVIDIA
- given-names: Duane
family-names: Merrill
email: dumerrill@nvidia.com
affiliation: NVIDIA
- given-names: Dustyn
family-names: Blasig
email: dblasig@nvidia.com
affiliation: NVIDIA
- given-names: Fengqi
family-names: Qiao
email: fqiao@nvidia.com
affiliation: NVIDIA
- given-names: Piotr
family-names: Majcher
email: pmajcher@nvidia.com
affiliation: NVIDIA
- given-names: Paul
family-names: Springer
email: pspringer@nvidia.com
affiliation: NVIDIA
- given-names: Markus
family-names: Hohnerbach
affiliation: NVIDIA
email: mhohnerbach@nvidia.com
- given-names: Jin
family-names: Wang
email: jinw@nvidia.com
affiliation: NVIDIA
- given-names: Manish
family-names: Gupta
affiliation: Google
email: manigupta@google.com
repository-code: 'https://github.com/NVIDIA/cutlass'
abstract: >-
CUTLASS is a collection of CUDA C++ template
abstractions for implementing high-performance
matrix-multiplication (GEMM) and related
computations at all levels and scales within CUDA.
It incorporates strategies for hierarchical
decomposition and data movement similar to those
used to implement cuBLAS and cuDNN. CUTLASS
decomposes these "moving parts" into reusable,
modular software components abstracted by C++
template classes. These thread-wide, warp-wide,
block-wide, and device-wide primitives can be
specialized and tuned via custom tiling sizes, data
types, and other algorithmic policy. The resulting
flexibility simplifies their use as building blocks
within custom kernels and applications.
keywords:
- 'cutlass, tensor cores, cuda, cute, nvidia, gpu, linear algebra, matrix computations'
license: BSD-3-Clause
license-url: https://github.com/NVIDIA/cutlass/blob/v3.0.0/LICENSE.txt
version: '3.0.0'
date-released: '2023-01-23'
identifiers:
- type: url
value: "https://github.com/NVIDIA/cutlass/tree/v3.0.0"
description: The GitHub release URL of tag 3.0.0
| CITATION.cff/0 | {
"file_path": "CITATION.cff",
"repo_id": "CITATION.cff",
"token_count": 1162
} | 0 |
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
message(STATUS "Configuring cublas ...")
if((DEFINED CUTLASS_ENABLE_CUBLAS AND NOT CUTLASS_ENABLE_CUBLAS) OR
(DEFINED CUBLAS_ENABLED AND NOT CUBLAS_ENABLED))
# Don't add cuBLAS if it's defined and false, assume it's not found.
set(CUBLAS_FOUND OFF)
message(STATUS "cuBLAS Disabled.")
elseif(NOT TARGET cublas)
find_path(
_CUBLAS_INCLUDE_DIR
NAMES cublas_v2.h
HINTS
${CUBLAS_INCLUDE_PATH}
ENV CUBLAS_INCLUDE_PATH
${CUBLAS_PATH}
ENV CUBLAS_PATH
${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES
include
)
find_library(
_CUBLAS_LIBRARY
NAMES cublas
HINTS
${CUBLAS_LIBRARY_PATH}
ENV CUBLAS_LIBRARY_PATH
${_CUBLAS_INCLUDE_DIR}/..
${CUBLAS_PATH}
ENV CUBLAS_PATH
${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES
lib64
lib/x64
lib
)
if(_CUBLAS_INCLUDE_DIR AND _CUBLAS_LIBRARY)
message(STATUS "cuBLAS: ${_CUBLAS_LIBRARY}")
message(STATUS "cuBLAS: ${_CUBLAS_INCLUDE_DIR}")
set(CUBLAS_FOUND ON CACHE INTERNAL "cublas Library Found")
set(CUBLAS_LIBRARY ${_CUBLAS_LIBRARY})
set(CUBLAS_INCLUDE_DIR ${_CUBLAS_INCLUDE_DIR})
else()
message(STATUS "cublas not found.")
set(CUBLAS_FOUND OFF CACHE INTERNAL "cublas Library Found")
endif()
endif()
set(CUTLASS_ENABLE_CUBLAS ${CUBLAS_FOUND} CACHE BOOL "Enable CUTLASS to build with cuBLAS library.")
if(CUTLASS_ENABLE_CUBLAS AND NOT CUBLAS_FOUND)
message(FATAL_ERROR "CUTLASS_ENABLE_CUBLAS enabled but cuBLAS library could not be found.")
endif()
if(CUTLASS_ENABLE_CUBLAS AND NOT TARGET cublas)
if(WIN32)
add_library(cublas STATIC IMPORTED GLOBAL)
else()
add_library(cublas SHARED IMPORTED GLOBAL)
endif()
add_library(nvidia::cublas ALIAS cublas)
set_property(
TARGET cublas
PROPERTY IMPORTED_LOCATION
${CUBLAS_LIBRARY})
target_include_directories(
cublas
INTERFACE
$<INSTALL_INTERFACE:include>
$<BUILD_INTERFACE:${CUBLAS_INCLUDE_DIR}>)
find_library(
_CUBLASLT_LIBRARY
NAMES cublasLt
HINTS
${CUBLAS_LIBRARY_PATH}
ENV CUBLAS_LIBRARY_PATH
${_CUBLAS_INCLUDE_DIR}/..
${CUBLAS_PATH}
ENV CUBLAS_PATH
${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES
lib64
lib/x64
lib
)
if(_CUBLASLT_LIBRARY AND NOT TARGET cublasLt)
if(WIN32)
add_library(cublasLt STATIC IMPORTED GLOBAL)
else()
add_library(cublasLt SHARED IMPORTED GLOBAL)
endif()
set_property(
TARGET cublasLt
PROPERTY IMPORTED_LOCATION
${_CUBLASLT_LIBRARY})
add_library(nvidia::cublasLt ALIAS cublasLt)
target_link_libraries(cublas INTERFACE cublasLt)
endif()
endif()
message(STATUS "Configuring cuBLAS ... done.")
| cuBLAS.cmake/0 | {
"file_path": "cuBLAS.cmake",
"repo_id": "cuBLAS.cmake",
"token_count": 1789
} | 1 |
var searchData=
[
['complextransform',['ComplexTransform',['../namespacecutlass.html#a59f08b1b99c4d52257b962d35ba55cde',1,'cutlass::ComplexTransform()'],['../namespacecutlass_1_1library.html#aa2b27589531eec608a86cf43a36c4175',1,'cutlass::library::ComplexTransform()']]]
];
| docs/search/enums_0.js/0 | {
"file_path": "docs/search/enums_0.js",
"repo_id": "docs",
"token_count": 114
} | 2 |
var searchData=
[
['scalarpointermode',['ScalarPointerMode',['../namespacecutlass_1_1library.html#af4d69c13cb62d2ef63e1e5491a32caba',1,'cutlass::library']]],
['splitkmode',['SplitKMode',['../namespacecutlass_1_1library.html#a5ccf134b261aafdde24f4185cf1ddda6',1,'cutlass::library']]],
['status',['Status',['../namespacecutlass.html#ac5a88c5840a28a9e0206b9cc7812a18d',1,'cutlass']]]
];
| docs/search/enums_8.js/0 | {
"file_path": "docs/search/enums_8.js",
"repo_id": "docs",
"token_count": 175
} | 3 |
var searchData=
[
['aligned_5fbuffer_2eh',['aligned_buffer.h',['../aligned__buffer_8h.html',1,'']]],
['arch_2eh',['arch.h',['../arch_8h.html',1,'']]],
['array_2eh',['array.h',['../array_8h.html',1,'']]],
['array_5fsubbyte_2eh',['array_subbyte.h',['../array__subbyte_8h.html',1,'']]],
['mma_2eh',['mma.h',['../arch_2mma_8h.html',1,'']]],
['mma_5fsm50_2eh',['mma_sm50.h',['../arch_2mma__sm50_8h.html',1,'']]],
['mma_5fsm60_2eh',['mma_sm60.h',['../arch_2mma__sm60_8h.html',1,'']]],
['mma_5fsm61_2eh',['mma_sm61.h',['../arch_2mma__sm61_8h.html',1,'']]]
];
| docs/search/files_0.js/0 | {
"file_path": "docs/search/files_0.js",
"repo_id": "docs",
"token_count": 281
} | 4 |
var searchData=
[
['epilogue_2eh',['epilogue.h',['../epilogue_8h.html',1,'']]],
['epilogue_5fbase_2eh',['epilogue_base.h',['../epilogue__base_8h.html',1,'']]],
['epilogue_5fworkspace_2eh',['epilogue_workspace.h',['../epilogue__workspace_8h.html',1,'']]],
['exceptions_2eh',['exceptions.h',['../exceptions_8h.html',1,'']]],
['predicated_5ftile_5fiterator_2eh',['predicated_tile_iterator.h',['../epilogue_2threadblock_2predicated__tile__iterator_8h.html',1,'']]]
];
| docs/search/files_4.js/0 | {
"file_path": "docs/search/files_4.js",
"repo_id": "docs",
"token_count": 207
} | 5 |
var searchData=
[
['numeric_5fconversion_2eh',['numeric_conversion.h',['../numeric__conversion_8h.html',1,'']]],
['numeric_5ftypes_2eh',['numeric_types.h',['../numeric__types_8h.html',1,'']]]
];
| docs/search/files_c.js/0 | {
"file_path": "docs/search/files_c.js",
"repo_id": "docs",
"token_count": 86
} | 6 |
.tabs, .tabs2, .tabs3 {
background-image: url('tab_b.png');
width: 100%;
z-index: 101;
font-size: 13px;
font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif;
}
.tabs2 {
font-size: 10px;
}
.tabs3 {
font-size: 9px;
}
.tablist {
margin: 0;
padding: 0;
display: table;
}
.tablist li {
float: left;
display: table-cell;
background-image: url('tab_b.png');
line-height: 36px;
list-style: none;
}
.tablist a {
display: block;
padding: 0 20px;
font-weight: bold;
background-image:url('tab_s.png');
background-repeat:no-repeat;
background-position:right;
color: #3E5035;
text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9);
text-decoration: none;
outline: none;
}
.tabs3 .tablist a {
padding: 0 10px;
}
.tablist a:hover {
background-image: url('tab_h.png');
background-repeat:repeat-x;
color: #fff;
text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0);
text-decoration: none;
}
.tablist li.current a {
background-image: url('tab_a.png');
background-repeat:repeat-x;
color: #fff;
text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0);
}
| docs/tabs.css/0 | {
"file_path": "docs/tabs.css",
"repo_id": "docs",
"token_count": 544
} | 7 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Containers for running grouped back-to-back GEMMs
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/util/device_memory.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/device/tensor_relu.h"
#include "reference/device/tensor_scale_bias.h"
#include "helper.h"
#define CHECK_GT(val1, val2) \
if((val1) <= (val2)) \
std::cerr << __FILE__ << " " << __LINE__ << ": CHECK_GT failed\n";
#define CHECK_TRUE(val) \
if(!(val)) \
std::cerr << __FILE__ << " " << __LINE__ << ": CHECK_TRUE failed\n";
////////////////////////////////////////////////////////////////////////////////
template <typename B2bGemm_>
struct B2bFusedGroupedGemmRun
{
using B2bGemm = B2bGemm_;
using ElementAccumulator = typename B2bGemm::ElementAccumulator;
using ElementCompute = typename B2bGemm::BaseKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
cutlass::Distribution::Kind init_Scale;
cutlass::Distribution::Kind init_Bias;
uint64_t seed;
//
// Methods
//
B2bFusedGroupedGemmRun(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_Scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_Bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_),
init_Scale(init_Scale_), init_Bias(init_Bias_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
cutlass::reference::host::TensorFillRandomUniform(
view, seed, 1, -1, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else if (dist_kind == cutlass::Distribution::AllZeros) {
cutlass::reference::host::TensorFill(view, Element(0));
}
else if (dist_kind == cutlass::Distribution::AllOnes) {
cutlass::reference::host::TensorFill(view, Element(1));
}
else {
std::cerr << "Not implemented\n";
return false;
}
return true;
}
/// Executes one test
bool run(
std::vector<cutlass::gemm::GemmCoord> problem_sizes_0,
std::vector<cutlass::gemm::GemmCoord> problem_sizes_1,
ElementCompute alpha0 = ElementCompute(1),
ElementCompute beta0 = ElementCompute(0),
ElementCompute alpha1 = ElementCompute(1),
ElementCompute beta1 = ElementCompute(0),
bool relu = true,
int warm_ups = 1,
int runs = 100) {
using HostTensorA = cutlass::HostTensor<typename B2bGemm::ElementA, typename B2bGemm::LayoutA>;
using HostTensorB = cutlass::HostTensor<typename B2bGemm::ElementB, typename B2bGemm::LayoutB>;
using HostTensorC = cutlass::HostTensor<typename B2bGemm::ElementC, typename B2bGemm::LayoutC>;
using HostTensorScale = cutlass::HostTensor<ElementCompute, typename B2bGemm::LayoutC>;
using HostTensorZ = cutlass::HostTensor<ElementAccumulator, typename B2bGemm::LayoutC>;
using HostTensorBias = cutlass::HostTensor<ElementCompute, typename B2bGemm::LayoutC>;
int problem_count = (int)problem_sizes_0.size();
std::vector<HostTensorA> host_tensor_A0(problem_count);
std::vector<HostTensorB> host_tensor_B0(problem_count);
std::vector<HostTensorC> host_tensor_C0(problem_count);
std::vector<HostTensorScale> host_tensor_Scale0(problem_count);
std::vector<HostTensorScale> host_tensor_Bias0(problem_count);
std::vector<HostTensorB> host_tensor_B1(problem_count);
std::vector<HostTensorC> host_tensor_C1(problem_count);
std::vector<HostTensorBias> host_tensor_Bias1(problem_count);
std::vector<HostTensorC> host_tensor_D1(problem_count);
std::vector<HostTensorZ> host_tensor_Z(problem_count);
std::vector<HostTensorC> host_tensor_ref_D0(problem_count);
std::vector<HostTensorC> host_tensor_ref_D1(problem_count);
std::vector<typename HostTensorA::TensorRef> ref_A0(problem_count);
std::vector<typename HostTensorB::TensorRef> ref_B0(problem_count);
std::vector<typename HostTensorC::TensorRef> ref_C0(problem_count);
std::vector<typename HostTensorScale::TensorRef> ref_Scale0(problem_count);
std::vector<typename HostTensorScale::TensorRef> ref_Bias0(problem_count);
std::vector<typename HostTensorB::TensorRef> ref_B1(problem_count);
std::vector<typename HostTensorC::TensorRef> ref_C1(problem_count);
std::vector<typename HostTensorBias::TensorRef> ref_Bias1(problem_count);
std::vector<typename HostTensorC::TensorRef> ref_D1(problem_count);
std::vector<typename HostTensorZ::TensorRef> ref_Z(problem_count);
std::vector<typename HostTensorC::TensorRef> ref_ref_D0(problem_count);
std::vector<typename HostTensorC::TensorRef> ref_ref_D1(problem_count);
for (int i = 0; i < problem_count; ++i) {
//
// Allocate the GEMM workspace
//
auto problem_size_0 = problem_sizes_0[i];
auto problem_size_1 = problem_sizes_1[i];
host_tensor_A0.at(i) = HostTensorA(problem_size_0.mk());
host_tensor_B0.at(i) = HostTensorB(problem_size_0.kn());
host_tensor_C0.at(i) = HostTensorC(problem_size_0.mn());
if (alpha0 == ElementCompute(0)) //per-channel scale
host_tensor_Scale0.at(i) = HostTensorScale(typename HostTensorZ::Layout::TensorCoord{1, problem_size_0.n()});
host_tensor_Bias0.at(i) = HostTensorScale(typename HostTensorBias::Layout::TensorCoord{1, problem_size_0.n()});
host_tensor_Z.at(i) = HostTensorZ(problem_size_0.mn());
host_tensor_ref_D0.at(i) = HostTensorC(problem_size_0.mn());
host_tensor_B1.at(i) = HostTensorB(problem_size_1.kn());
host_tensor_C1.at(i) = HostTensorC(problem_size_1.mn());
host_tensor_Bias1.at(i) = HostTensorScale(typename HostTensorBias::Layout::TensorCoord{1, problem_size_1.n()});
host_tensor_D1.at(i) = HostTensorC(problem_size_1.mn());
host_tensor_ref_D1.at(i) = HostTensorC(problem_size_1.mn());
CHECK_TRUE(initialize_tensor(host_tensor_A0.at(i).host_view(), init_A, seed + 2019));
CHECK_TRUE(initialize_tensor(host_tensor_B0.at(i).host_view(), init_B, seed + 2018));
CHECK_TRUE(initialize_tensor(host_tensor_C0.at(i).host_view(), init_C, seed + 2017));
if (alpha0 == ElementCompute(0)) //per-channel scale
CHECK_TRUE(initialize_tensor(host_tensor_Scale0.at(i).host_view(), init_Scale, seed + 2014));
CHECK_TRUE(initialize_tensor(host_tensor_Bias0.at(i).host_view(), init_Bias, seed + 2013));
CHECK_TRUE(initialize_tensor(host_tensor_B1.at(i).host_view(), init_B, seed + 2016));
CHECK_TRUE(initialize_tensor(host_tensor_C1.at(i).host_view(), init_C, seed + 2015));
CHECK_TRUE(initialize_tensor(host_tensor_Bias1.at(i).host_view(), init_Bias, seed + 2012));
cutlass::reference::host::TensorFill(
host_tensor_D1.at(i).host_view());
cutlass::reference::host::TensorFill(
host_tensor_ref_D0.at(i).host_view());
cutlass::reference::host::TensorFill(
host_tensor_ref_D1.at(i).host_view());
host_tensor_A0.at(i).sync_device();
host_tensor_B0.at(i).sync_device();
host_tensor_C0.at(i).sync_device();
if (alpha0 == ElementCompute(0)) //per-channel scale
host_tensor_Scale0.at(i).sync_device();
host_tensor_Bias0.at(i).sync_device();
host_tensor_B1.at(i).sync_device();
host_tensor_C1.at(i).sync_device();
host_tensor_Bias1.at(i).sync_device();
host_tensor_D1.at(i).sync_device();
host_tensor_ref_D0.at(i).sync_device();
host_tensor_ref_D1.at(i).sync_device();
ref_A0.at(i) = (host_tensor_A0.at(i).device_ref());
ref_B0.at(i) = (host_tensor_B0.at(i).device_ref());;
ref_C0.at(i) = (host_tensor_C0.at(i).device_ref());
if (alpha0 == ElementCompute(0)) //per-channel scale
ref_Scale0.at(i) = (host_tensor_Scale0.at(i).device_ref());
ref_Bias0.at(i) = (host_tensor_Bias0.at(i).device_ref());
ref_B1.at(i) = (host_tensor_B1.at(i).device_ref());
ref_C1.at(i) = {host_tensor_Bias1.at(i).device_data(), typename B2bGemm::LayoutC::Stride(0)};
ref_Bias1.at(i) = (host_tensor_Bias1.at(i).device_ref());
ref_D1.at(i) = (host_tensor_D1.at(i).device_ref());
ref_Z.at(i) = (host_tensor_Z.at(i).device_ref());
ref_ref_D0.at(i) = (host_tensor_ref_D0.at(i).device_ref());
ref_ref_D1.at(i) = (host_tensor_ref_D1.at(i).device_ref());
}
//
// Initialize the GEMM operator
//
cutlass::DeviceAllocation<typename HostTensorA::TensorRef> device_ref_A0(problem_count);
device_ref_A0.copy_from_host(ref_A0.data());
cutlass::DeviceAllocation<typename HostTensorB::TensorRef> device_ref_B0(problem_count);
device_ref_B0.copy_from_host(ref_B0.data());
cutlass::DeviceAllocation<typename HostTensorC::TensorRef> device_ref_C0(problem_count);
device_ref_C0.copy_from_host(ref_C0.data());
cutlass::DeviceAllocation<typename HostTensorScale::TensorRef> device_ref_Scale0(problem_count);
device_ref_Scale0.copy_from_host(ref_Scale0.data());
cutlass::DeviceAllocation<typename HostTensorScale::TensorRef> device_ref_Bias0(problem_count);
device_ref_Bias0.copy_from_host(ref_Bias0.data());
cutlass::DeviceAllocation<typename HostTensorB::TensorRef> device_ref_B1(problem_count);
device_ref_B1.copy_from_host(ref_B1.data());
cutlass::DeviceAllocation<typename HostTensorC::TensorRef> device_ref_C1(problem_count);
device_ref_C1.copy_from_host(ref_C1.data());
cutlass::DeviceAllocation<typename HostTensorBias::TensorRef> device_ref_Bias1(problem_count);
device_ref_Bias1.copy_from_host(ref_Bias1.data());
cutlass::DeviceAllocation<typename HostTensorC::TensorRef> device_ref_D1(problem_count);
device_ref_D1.copy_from_host(ref_D1.data());
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> device_problem_sizes_0(problem_count);
device_problem_sizes_0.copy_from_host(problem_sizes_0.data());
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> device_problem_sizes_1(problem_count);
device_problem_sizes_1.copy_from_host(problem_sizes_1.data());
B2bGemm b2b_gemm_op;
int threadblock_count = B2bGemm::sufficient(problem_sizes_1.data(), problem_count);
if (!threadblock_count) {
std::cout << "Active CUDA device lacks hardware resources to run CUTLASS Grouped GEMM kernel." << std::endl;
return false;
}
typename B2bGemm::Arguments arguments{
problem_count,
device_problem_sizes_0.get(),
device_problem_sizes_1.get(),
device_ref_A0.get(),
device_ref_B0.get(),
device_ref_C0.get(),
device_ref_Scale0.get(),
device_ref_Bias0.get(),
device_ref_B1.get(),
device_ref_C1.get(),
device_ref_D1.get(),
{alpha0, beta0},
{alpha1, beta1},
threadblock_count
};
cutlass::Status status = b2b_gemm_op.can_implement(arguments);
if(status != cutlass::Status::kSuccess) {
std::cout << "Problem sizes not supported.\n"
<< "Requirments:\n"
<< " problem_size_0.M = problem_size_1.M\n"
<< " problem_size_0.N = problem_size_1.K\n"
<< " ThreadblockShape0::kN = problem_size_0.N\n"
<< " ThreadblockShape1::kN = problem_size_1.N" << std::endl;
}
status = b2b_gemm_op.initialize(arguments);
CUTLASS_CHECK(status);
for(int i = 0; i < warm_ups; i++) {
status = b2b_gemm_op();
CUTLASS_CHECK(status);
}
//
// Run the GEMM
//
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for(int i = 0; i < runs; i++) {
status = b2b_gemm_op();
CUTLASS_CHECK(status);
}
cudaEventRecord(stop);
cudaDeviceSynchronize();
float gemmTime;
cudaEventElapsedTime(&gemmTime, start, stop);
std::cout << "Fusion time " << gemmTime / (float)runs << " ms\n";
for (int i = 0; i < problem_count; ++i) {
host_tensor_D1.at(i).sync_host();;
//
// Verify
//
cutlass::reference::device::Gemm<
typename B2bGemm::ElementA, typename B2bGemm::LayoutA,
typename B2bGemm::ElementB, typename B2bGemm::LayoutB,
ElementAccumulator, typename B2bGemm::LayoutC,
ElementAccumulator, ElementAccumulator>
reference_gemm_0;
cutlass::reference::device::Gemm<
typename B2bGemm::ElementA, typename B2bGemm::LayoutA,
typename B2bGemm::ElementB, typename B2bGemm::LayoutB,
typename B2bGemm::ElementC, typename B2bGemm::LayoutC, ElementCompute,
ElementAccumulator>
reference_gemm_1;
auto problem_size_0 = problem_sizes_0[i];
auto problem_size_1 = problem_sizes_1[i];
reference_gemm_0(
problem_size_0,
ElementAccumulator(1), //intermediate alpha=1
ref_A0.at(i),
ref_B0.at(i),
ElementAccumulator(0), //beta = 0
ref_Z.at(i),
ref_Z.at(i),
ElementAccumulator(0)
);
cutlass::reference::device::TensorScaleBiasGemm<
ElementAccumulator, typename B2bGemm::ElementC, typename B2bGemm::LayoutC,
ElementCompute, typename B2bGemm::LayoutC
> (
problem_size_0,
ref_Z.at(i),
ref_ref_D0.at(i),
alpha0,
ref_Scale0.at(i),
ref_Bias0.at(i)
);
if(relu) {
cutlass::reference::device::TensorReLu(host_tensor_ref_D0.at(i).device_view());
}
reference_gemm_1(
problem_size_1,
alpha1,
ref_ref_D0.at(i),
ref_B1.at(i),
beta1,
{host_tensor_Bias1.at(i).device_data(), typename B2bGemm::LayoutC::Stride(0)},
ref_ref_D1.at(i)
);
if(relu) {
cutlass::reference::device::TensorReLu(host_tensor_ref_D1.at(i).device_view());
}
cudaDeviceSynchronize();
host_tensor_ref_D0.at(i).sync_host();
host_tensor_ref_D1.at(i).sync_host();
CHECK_GT(cutlass::reference::host::TensorNorm(host_tensor_ref_D0.at(i).host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(host_tensor_D1.at(i).host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(host_tensor_ref_D1.at(i).host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(
host_tensor_ref_D1.at(i).host_view(),
host_tensor_D1.at(i).host_view());
CHECK_TRUE(passed);
if (!passed)
{
std::stringstream fname;
fname << "error_B2bGemm_device_fused.txt";
std::cerr << "Check failed for GEMM " << i << " in the group." << std::endl;
std::cerr << "Dumping results in " << fname.str() << "\n";
std::ofstream file(fname.str());
file
<< "GEMM " << i << " in group\n"
<< "A0 =\n" << host_tensor_A0.at(i).host_view()
<< "\nB0 =\n" << host_tensor_B0.at(i).host_view()
<< "\nC0 =\n" << host_tensor_C0.at(i).host_view()
<< "\nScale0:\n" << host_tensor_Scale0.at(i).host_view() << "\n"
<< "\nBias0:\n" << host_tensor_Bias0.at(i).host_view() << "\n"
<< "\nB1 =\n" << host_tensor_B1.at(i).host_view()
<< "\nC1 =\n" << host_tensor_C1.at(i).host_view()
<< "\nBias1:\n" << host_tensor_Bias1.at(i).host_view() << "\n"
<< "\n\nReference =\n" << host_tensor_ref_D1.at(i).host_view()
<< "\nComputed =\n" << host_tensor_D1.at(i).host_view();
return false;
}
}
return true;
}
};
////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/b2b_grouped_gemm_run.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/b2b_grouped_gemm_run.h",
"repo_id": "examples",
"token_count": 8050
} | 8 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief High-level interface for running a grouped version of a CUTLASS kernel
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// High-level interface for running a grouped version of a CUTLASS kernel
template <
typename BaseKernel_ ///! Kernel-scoped matrix multiply-accumulate
>
struct GroupedKernel {
public:
using BaseKernel = BaseKernel_;
using Epilogue = typename BaseKernel::Epilogue;
/// Types that need to be exported to work properly with device::BaseGrouped
using ElementA = typename BaseKernel::ElementA;
using LayoutA = typename BaseKernel::LayoutA;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
static ComplexTransform const kTransformA = BaseKernel::kTransformA;
static int const kAlignmentA = BaseKernel::kAlignmentA;
using ElementB = typename BaseKernel::ElementB;
using LayoutB = typename BaseKernel::LayoutB;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
static ComplexTransform const kTransformB = BaseKernel::kTransformB;
static int const kAlignmentB = BaseKernel::kAlignmentB;
using ElementC = typename BaseKernel::ElementC;
using LayoutC = typename BaseKernel::LayoutC;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
static int const kAlignmentC = BaseKernel::kAlignmentC;
using ElementAccumulator = typename BaseKernel::Mma::Policy::Operator::ElementC;
using EpilogueOutputOp = typename BaseKernel::EpilogueOutputOp;
using ThreadblockSwizzle = typename BaseKernel::ThreadblockSwizzle;
using Operator = typename BaseKernel::Operator;
using WarpMmaOperator = typename BaseKernel::Mma::Policy::Operator;
using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator;
using MathOperator = typename WarpMmaOperator::MathOperator;
using OperatorClass = typename WarpMmaOperator::OperatorClass;
using ArchTag = typename WarpMmaOperator::ArchTag;
using ThreadblockShape = typename BaseKernel::Mma::Shape;
using WarpShape = typename BaseKernel::WarpShape;
using InstructionShape = typename BaseKernel::InstructionShape;
static int const kStages = BaseKernel::Mma::kStages;
using Mma = typename BaseKernel::Mma;
using Arguments = typename BaseKernel::GroupedArguments;
using Params = typename BaseKernel::GroupedParams;
using ProblemVisitor = typename ThreadblockSwizzle::ProblemVisitor;
static int const kThreadCount = BaseKernel::kThreadCount;
/// Shared memory storage structure
struct SharedStorage {
typename BaseKernel::SharedStorage kernel;
// ProblemVisitor shared storage can't be overlapped with others
typename ProblemVisitor::SharedStorage problem_visitor;
};
public:
//
// Methods
//
CUTLASS_DEVICE
GroupedKernel() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return Status::kSuccess;
}
/// Executes a kernel-level GEMM in a loop
CUTLASS_DEVICE
void operator()(Params ¶ms, SharedStorage &shared_storage) {
ThreadblockSwizzle swizzle(params.problem_visitor, shared_storage.problem_visitor, blockIdx.x);
if (ProblemVisitor::kTransposed) {
params.transpose();
}
BaseKernel mma;
// Outer 'persistent' loop to iterate over tiles
while (swizzle.problem_visitor.next_tile()) {
typename BaseKernel::Params mma_params = params.to_single_params(swizzle.problem_visitor);
mma.run_with_swizzle(mma_params, shared_storage.kernel, swizzle);
// Next tile
swizzle.problem_visitor.advance(gridDim.x);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/kernel/grouped.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/kernel/grouped.h",
"repo_id": "examples",
"token_count": 1839
} | 9 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
cutlass::Quaternion<float> alpha;
cutlass::Quaternion<float> beta;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({1024, 1024, 1024}),
batch_count(1),
reference_check(true),
iterations(20),
alpha(1),
beta() { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("batch", batch_count);
cmd.get_cmd_line_argument("alpha", alpha.w());
cmd.get_cmd_line_argument("alpha_i", alpha.x());
cmd.get_cmd_line_argument("alpha_j", alpha.y());
cmd.get_cmd_line_argument("alpha_k", alpha.z());
cmd.get_cmd_line_argument("beta", beta.w());
cmd.get_cmd_line_argument("beta_i", beta.x());
cmd.get_cmd_line_argument("beta_j", beta.y());
cmd.get_cmd_line_argument("beta_k", beta.z());
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "21_quaternion_gemm example\n\n"
<< " This example uses the CUTLASS Library to execute Quaternion GEMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --batch=<int> Number of GEMM operations executed in one batch\n"
<< " --alpha=<f32> Epilogue scalar alpha (real part)\n"
<< " --alpha_i=<f32> Epilogue scalar alpha_i (imaginary part)\n"
<< " --alpha_j=<f32> Epilogue scalar alpha_j (imaginary part)\n"
<< " --alpha_k=<f32> Epilogue scalar alpha_k (imaginary part)\n"
<< " --beta=<f32> Epilogue scalar beta (real part)\n\n"
<< " --beta_i=<f32> Epilogue scalar beta_i (imaginary part)\n\n"
<< " --beta_j=<f32> Epilogue scalar beta_j (imaginary part)\n\n"
<< " --beta_k=<f32> Epilogue scalar beta_k (imaginary part)\n\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/21_quaternion_gemm/21_quaternion_gemm --batch=7 --m=1024 --n=512 --k=1024 \\\n"
<< " --alpha=2 --alpha_i=-2 --beta=0.707 --beta_i=-.707\n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product() * batch_count * 16;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using precision = float;
using Element = cutlass::Quaternion<float>;
using ElementComputeEpilogue = Element; // <- data type of epilogue operations
using ElementAccumulator = Element; // <- data type of accumulator
using ElementInputA = Element; // <- data type of elements in input matrix A
using ElementInputB = Element; // <- data type of elements in input matrix B
using ElementOutput = Element; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassSimt;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm50;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<64, 64, 4>; // <- threadblock tile M = 64, N = 64, K = 8
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<32, 16, 4>; // <- warp tile M = 32, N = 16, K = 8
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<1, 1, 1>; // <- MMA Op tile M = 1, N = 1, K = 1
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- Defaults
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 2;
using Gemm = cutlass::gemm::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
int run(Options options) {
// PASS/FAIL status
bool passed = true;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size = options.problem_size;
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
4,
-4,
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
4,
-4,
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
4,
-4,
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Result structure
Result result;
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMMs
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
if (options.reference_check) {
// Create instantiation for device reference gemm kernel
cutlass::reference::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue> gemm_device;
// Launch device reference gemm kernel
gemm_device(problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
beta,
tensor_c.device_ref(),
tensor_ref_d.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
passed &= cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
}
if (passed) {
std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " GFLOPs: " << result.gflops << std::endl;
}
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main(int argc, char const** argv) {
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
printf("%d x %d x %d Single Precision Quaternion Matrix Multiply\n", \
options.problem_size.m(), options.problem_size.n(), options.problem_size.k());
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
return run(options);
}
| examples/21_quaternion_gemm/quaternion_gemm.cu/0 | {
"file_path": "examples/21_quaternion_gemm/quaternion_gemm.cu",
"repo_id": "examples",
"token_count": 6915
} | 10 |
################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import sys
print("This example is deprecated. Please see examples/python for examples of using "
"the CUTLASS Python interface.")
sys.exit(0)
import numpy as np
import cutlass.backend as pycutlass
from cutlass.backend import *
from cutlass.backend.utils.device import device_cc
import cutlass_bindings
from bfloat16 import bfloat16
import argparse
# parse the arguments
parser = argparse.ArgumentParser(description="Launch CUTLASS GEMM kernels from Python: 'D = alpha * A * B + beta * C'")
# Operation description
# math instruction description
parser.add_argument("-i", "--instruction_shape",
default=[1, 1, 1], nargs=3, type=int,
help="This option describes the size of MMA op")
parser.add_argument("-ta", "--element_a", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor A')
parser.add_argument("-tb", "--element_b", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor B')
parser.add_argument("-tc", "--element_c", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor C and output tensor D')
parser.add_argument("-tacc", "--element_acc", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of accumulator')
parser.add_argument('-m', "--math", default="multiply_add",
type=str, choices=["multiply_add", "multiply_add_fast_bf16", "multiply_add_fast_f32"], help="math instruction")
parser.add_argument('-op', "--opcode", default="Simt", type=str,
choices=["Simt", 'TensorOp'],
help="This option describes whether you want to use tensor \
cores (TensorOp) or regular SIMT cores (Simt) on GPU SM")
# tile description
parser.add_argument("-b", "--threadblock_shape",
default=[128, 128, 8], nargs=3, type=int,
help="This option describes the tile size a thread block with compute")
parser.add_argument("-s", "--stages", default=4,
type=int, help="Number of pipelines you want to use")
parser.add_argument("-w", "--warp_count", default=[4, 2, 1], nargs=3, type=int,
help="This option describes the number of warps along M, N, and K of the threadblock")
parser.add_argument("-cc", "--compute_capability", default=80,
type=int, help="This option describes CUDA SM architecture number")
# A
parser.add_argument('-la', "--layout_a", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor A")
parser.add_argument('-aa', '--alignment_a', default=1,
type=int, help="Memory alignement of input tensor A")
# B
parser.add_argument('-lb', "--layout_b", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor B")
parser.add_argument('-ab', '--alignment_b', default=1,
type=int, help="Memory alignment of input tensor B")
# C
parser.add_argument('-lc', "--layout_c", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor C and output tensor D")
parser.add_argument('-ac', '--alignment_c', default=1,
type=int, help="Memory alignment of input tensor C and output tensor D")
# epilogue
parser.add_argument("-te", "--element_epilogue", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16'], help='Epilogue datatype')
parser.add_argument("-ep", "--epilogue_functor", default="LinearCombination",
type=str, choices=['LinearCombination', 'FastLinearCombinationClamp', 'LinearCombinationClamp'],
help="This option describes the epilogue part of the kernel")
# swizzling
parser.add_argument("-sw", "--swizzling_functor", default="IdentitySwizzle1", type=str, choices=[
"IdentitySwizzle1", "IdentitySwizzle2", "IdentitySwizzle4", "IdentitySwizzle8", "HorizontalSwizzle", "BatchedIdentitySwizzle"],
help="This option describes how thread blocks are scheduled on GPU")
# Argument
parser.add_argument("-p", "--problem_size",
default=[128, 128, 128], nargs=3, type=int,
help="GEMM problem size M, N, K")
parser.add_argument("-alpha", "--alpha", default=1.0, type=float,
help="Scaling factor of A * B")
parser.add_argument("-beta", "--beta", default=0.0, type=float,
help="Scaling factor of C")
parser.add_argument("-gm", "--gemm_mode", default="Gemm", type=str,
choices=["Gemm", "GemmSplitKParallel", "Batched", "Array"],
help="GEMM mode. Gemm is used for non-splitK or serial-splitK. \
GemmSplitKParallel is used for parallel splitK")
parser.add_argument('-k', '--split_k_slices', default=1,
type=int, help="Number of split-k partitions. (default 1)")
parser.add_argument('-bias', '--bias', action='store_true', help="C is bias vector")
parser.add_argument('-batch', '--batch', default=1, type=int, help="batch size for batched GEMM")
# Activation function
parser.add_argument("-activ", "--activation_function", default="identity",
choices=["identity", "relu", "leaky_relu", "tanh", "sigmoid", "silu", "hardswish", "gelu"], help="activation function")
parser.add_argument("-activ_arg", "--activation_args", default=[], nargs="+", type=float,
help="addition arguments for activation")
parser.add_argument('--print_cuda', action="store_true",
help="print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
cc = device_cc()
if args.compute_capability != cc:
raise Exception(("Parameter --compute-capability of {} "
"does not match that of the device of {}.").format(args.compute_capability, cc))
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
pycutlass.compiler.nvcc()
np.random.seed(0)
element_a = getattr(cutlass_bindings, args.element_a)
element_b = getattr(cutlass_bindings, args.element_b)
element_c = getattr(cutlass_bindings, args.element_c)
element_acc = getattr(cutlass_bindings, args.element_acc)
math_operation = getattr(MathOperation, args.math)
opclass = getattr(cutlass_bindings.OpClass, args.opcode)
math_inst = MathInstruction(
args.instruction_shape, element_a, element_b,
element_acc, opclass, math_operation
)
tile_description = TileDescription(
args.threadblock_shape, args.stages, args.warp_count,
math_inst
)
layout_a = getattr(cutlass_bindings, args.layout_a)
layout_b = getattr(cutlass_bindings, args.layout_b)
layout_c = getattr(cutlass_bindings, args.layout_c)
A = TensorDescription(
element_a, layout_a, args.alignment_a
)
B = TensorDescription(
element_b, layout_b, args.alignment_b
)
C = TensorDescription(
element_c, layout_c, args.alignment_c
)
element_epilogue = getattr(cutlass_bindings, args.element_epilogue)
if (args.activation_function == "identity"
or (args.gemm_mode == "GemmSplitKParallel" and args.split_k_slices > 1)):
#
epilogue_functor = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = getattr(cutlass_bindings, args.swizzling_functor)
operation = GemmOperationUniversal(
arch=args.compute_capability, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
if args.print_cuda:
print(operation.rt_module.emit())
operations = [operation, ]
if args.gemm_mode == "GemmSplitKParallel":
if (args.activation_function == "identity"):
epilogue_functor_reduction = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor_reduction = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
reduction_operation = ReductionOperation(
shape=cutlass_bindings.MatrixCoord(4, 32 * C.alignment),
C=C, element_accumulator=element_acc,
element_compute=element_epilogue,
epilogue_functor=epilogue_functor_reduction,
count=C.alignment
)
operations.append(reduction_operation)
pycutlass.compiler.add_module(operations)
# User-provide inputs
problem_size = cutlass_bindings.gemm.GemmCoord(
args.problem_size[0], args.problem_size[1], args.problem_size[2])
tensor_a_size = args.batch * problem_size.m() * problem_size.k()
if args.element_a != "int8":
if args.element_a == "bfloat16":
tensor_A = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_a_size,))
).astype(bfloat16)
else:
tensor_A = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_a_size,))
).astype(getattr(np, args.element_a))
else:
tensor_A = np.random.uniform(
low=-2, high=2,size=(tensor_a_size,)
).astype(getattr(np, args.element_a))
tensor_b_size = args.batch * problem_size.k() * problem_size.n()
if args.element_b != "int8":
if args.element_b == "bfloat16":
tensor_B = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_b_size,))
).astype(bfloat16)
else:
tensor_B = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_b_size,))
).astype(getattr(np, args.element_b))
else:
tensor_B = np.random.uniform(
low=-2, high=2, size=(tensor_b_size,)
).astype(getattr(np, args.element_b))
if args.element_c != "int8":
if args.bias:
if args.layout_c == "RowMajor":
tensor_c_size = args.batch * problem_size.n()
elif args.layout_c == "ColumnMajor":
tensor_c_size = args.batch * problem_size.m()
else:
raise ValueError(args.layout_c)
else:
tensor_c_size = args.batch * problem_size.m() * problem_size.n()
if args.element_c == "bfloat16":
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_c_size,))
).astype(bfloat16)
else:
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_c_size,))
).astype(getattr(np, args.element_c))
else:
tensor_C = np.random.uniform(
low=-2, high=2, size=(args.batch * problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
tensor_D = np.zeros(
shape=(args.batch * problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
output_op = operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args))
arguments = GemmArguments(
operation=operation, problem_size=problem_size,
A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D,
output_op=output_op,
gemm_mode=getattr(cutlass_bindings.gemm.Mode, args.gemm_mode),
split_k_slices=args.split_k_slices, batch=args.batch
)
if args.gemm_mode == "GemmSplitKParallel":
reduction_arguments = ReductionArguments(
operation=reduction_operation,
problem_size=[problem_size.m(), problem_size.n()],
partitions=args.split_k_slices, workspace=arguments.ptr_D,
destination=tensor_D, source=tensor_C,
output_op=reduction_operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args)),
bias = arguments.bias
)
operation.run(arguments)
if args.gemm_mode == "GemmSplitKParallel":
reduction_operation.run(reduction_arguments)
reduction_arguments.sync()
else:
arguments.sync()
# run the host reference module
reference = ReferenceModule(A, B, C)
tensor_D_ref = reference.run(
tensor_A, tensor_B, tensor_C, problem_size, args.alpha, args.beta, args.bias, args.batch)
tensor_D_ref = getattr(pycutlass, args.activation_function).numpy(*([tensor_D_ref,] + args.activation_args))
try:
assert np.array_equal(tensor_D, tensor_D_ref)
except:
assert np.allclose(tensor_D, tensor_D_ref, atol=1e-5)
print("Passed.")
| examples/40_cutlass_py/customizable/gemm.py/0 | {
"file_path": "examples/40_cutlass_py/customizable/gemm.py",
"repo_id": "examples",
"token_count": 6052
} | 11 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS Attention Example.
This workload computes a fused multi head attention that supports variable sequence lengths.
Because it keeps the attention matrix in shared memory, it's both faster and
uses less global memory.
This is based on `"Self-Attention Does Not Need O(n^2) Memory" <http://arxiv.org/abs/2112.05682>`_,
and very similar to `"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness" <https://arxiv.org/abs/2205.14135>`_.
Algorithm:
In short, we can compute the output incrementally in blocks of size B,
we just need to divide the final result by the sum of all coefficients in
the softmax (which we compute incrementally) with the following pseudo-code:
```
s_prime = torch.zeros([num_queries, B])
O = torch.zeros([num_queries, head_size_v])
for i in range(0, K.shape[0], B):
si = exp((Q . K[i * B:(i+1) * B].t) * scale)
sum_coefs += attn_unscaled.sum(-1)
O += si . V[i * B:(i+1) * B]
O = O / s_prime
```
In practice, and for numerical stability reasons,
we also substract the maximum so far (`mi`) before doing
the exponential. When we encounter new keys, the maximum
used to compute O so far (`m_prime`) can differ from the
current maximum, so we update O before accumulating with
```
O = O * exp(m_prime - mi)
m_prime = mi
```
Implementation details:
- `si` is stored in shared memory between the 2 back to back gemms
- we keep and accumulate the output
directly in registers if we can (`head_size_v <= 128`).
Otherwise, we store it & accumulate in global memory (slower)
- blocks are parallelized across the batch dimension, the number
of heads, and the query sequence size
Examples:
# Run an attention example with default setup
$ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_variable_seqlen
# Run an attention example with custom setup
$ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_variable_seqlen --head_number=2 --batch_size=3 --head_size=32 --head_size_v=64 --seq_length=512 --seq_length_kv=1024 --causal=true
Acknowledgement: Fixed-sequence-length FMHA code was upstreamed by Meta xFormers (https://github.com/facebookresearch/xformers).
Using grouped GEMM to handle variable sequence lengths is inspired by an idea originally prototyped by ByteDance Inc.
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/device/gemm_grouped.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/device/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/fast_math.h"
#include "default_fmha_grouped.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
bool error;
bool reference_check;
bool use_mask;
bool causal;
bool fixed_seq_length;
std::vector<cutlass::gemm::GemmCoord> problem_sizes0;
std::vector<cutlass::gemm::GemmCoord> problem_sizes1;
std::vector<cutlass::gemm::GemmCoord> problem_sizes0_real;
std::vector<cutlass::gemm::GemmCoord> problem_sizes1_real;
int alignment;
int head_number;
int batch_size;
int head_size;
int head_size_v;
int seq_length;
int seq_length_kv;
int iterations;
int problem_count;
// alpha0, alpha1 and beta are fixed
// in this multi-head attention example
float alpha0;
float alpha1;
float beta;
cutlass::gemm::kernel::GroupScheduleMode scheduler_mode;
//
// Methods
//
Options():
help(false),
error(false),
alignment(1),
reference_check(true),
head_number(12),
batch_size(16),
head_size(64),
head_size_v(64),
seq_length(1024),
seq_length_kv(1024),
use_mask(false),
iterations(20),
causal(false),
fixed_seq_length(false),
problem_count(batch_size * head_number),
scheduler_mode(cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly)
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("alignment", alignment, 1);
cmd.get_cmd_line_argument("head_number", head_number, 12);
cmd.get_cmd_line_argument("batch_size", batch_size, 16);
cmd.get_cmd_line_argument("head_size", head_size, 64);
cmd.get_cmd_line_argument("head_size_v", head_size_v, head_size);
cmd.get_cmd_line_argument("seq_length", seq_length, 1024);
cmd.get_cmd_line_argument("seq_length_kv", seq_length_kv, seq_length);
cmd.get_cmd_line_argument("use_mask", use_mask, false);
cmd.get_cmd_line_argument("iterations", iterations, 20);
cmd.get_cmd_line_argument("reference-check", reference_check, true);
cmd.get_cmd_line_argument("causal", causal, true);
cmd.get_cmd_line_argument("fixed_seq_length", fixed_seq_length, false);
std::vector<std::string> scheduler_mode_strs;
cmd.get_cmd_line_arguments("scheduler-mode", scheduler_mode_strs);
if (!scheduler_mode_strs.empty()) {
if (scheduler_mode_strs.size() > 1) {
std::cerr << "Only one scheduler mode may be passed in" << std::endl;
error = true;
return;
}
std::string scheduler_mode_str = scheduler_mode_strs[0];
if (scheduler_mode_str == "kDeviceOnly") {
scheduler_mode = cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly;
} else if (scheduler_mode_str == "kHostPrecompute") {
scheduler_mode = cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute;
} else {
std::cerr << "Unrecognized scheduler mode '" << scheduler_mode_str << "'" << std::endl;
error = true;
return;
}
}
if (fixed_seq_length) {
std::cout << "NOTE: Better performance is expected for fixed-sized sequence length from 41_fused_multi_head_attention_fixed_seqlen." << std::endl;
}
randomize_problems();
}
void randomize_problems() {
problem_count = head_number * batch_size;
problem_sizes0.reserve(problem_count);
problem_sizes1.reserve(problem_count);
// When using mask, the original inputs are not padded
// and we need to save these info.
if (use_mask) {
problem_sizes0_real.reserve(problem_count);
problem_sizes1_real.reserve(problem_count);
}
for (int i = 0; i < batch_size; ++i) {
// problems belonging to the same batch share the same seq len
int m_real, mkv_real;
if (fixed_seq_length) {
m_real = seq_length;
mkv_real = seq_length_kv;
} else {
m_real = (rand() % seq_length) + 1;
// Only randomize seq_length_kv if it was set to a different value than
// seq_length originally.
if (seq_length != seq_length_kv) {
mkv_real = (rand() % seq_length_kv) + 1;
} else {
mkv_real = m_real;
}
}
int m = (m_real + alignment - 1) / alignment * alignment;
int mkv = (mkv_real + alignment - 1) / alignment * alignment;
int k0 = head_size;
int k1 = head_size_v;
for (int j = 0; j < head_number; ++j) {
cutlass::gemm::GemmCoord problem0(m, mkv, k0);
cutlass::gemm::GemmCoord problem1(m, k1, mkv);
problem_sizes0.push_back(problem0);
problem_sizes1.push_back(problem1);
if (use_mask) {
cutlass::gemm::GemmCoord problem0_real(m_real, mkv_real, k0);
cutlass::gemm::GemmCoord problem1_real(m_real, k1, mkv_real);
problem_sizes0_real.push_back(problem0_real);
problem_sizes1_real.push_back(problem1_real);
}
}
}
}
void print_problems() {
std::cout << " Running " << batch_size << " batches, each with " << head_number << " heads of size " << head_size << ":" << std::endl;
for (int i = 0; i < batch_size; ++i) {
int idx = i * head_number;
std::cout << " [" << i << "] seq_length = " << problem_sizes0[idx].m() << " seq_length_kv = " << problem_sizes0[idx].n() << std::endl;
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "41_fused_multi_head_attention_variable_seqlen\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --head_number=<int> Head number in multi-head attention (default: --head_number=12)\n"
<< " --batch_size=<int> Batch size in multi-head attention (default: --batch_size=16)\n"
<< " --head_size=<int> Head size in multi-head attention (default: --head_size=64)\n"
<< " --head_size_v=<int> Head size in multi-head attention for V (default: --head_size_v=head_size)\n"
<< " --seq_length=<int> Sequence length in multi-head attention for Q (default: --seq_length=1024)\n"
<< " --seq_length_kv=<int> Sequence length in multi-head attention for K/V (default: --seq_length_kv=seq_length)\n"
<< " --use_mask=<bool> If true, performs padding-like masking in softmax.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --reference-check=<bool> If true, performs reference check.\n"
<< " --causal=<bool> If true, uses causal masking.\n"
<< " --fixed_seq_length=<bool> If true, uses the same sequence length for each item in the batch.\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fops = int64_t();
for (size_t i = 0; i < problem_sizes0.size(); ++i) {
auto const& problem0 = problem_sizes0[i];
auto const& problem1 = problem_sizes1[i];
for (int row = 0; row < problem0.m(); ++row) {
int num_cols0 = problem0.n();
if (causal) {
num_cols0 = std::min(row + 1, num_cols0);
}
// P <- Q . K_t
fops += 2 * num_cols0 * problem0.k();
// P <- exp(P - max(P))
fops += 2 * num_cols0;
// S <- sum(P)
fops += num_cols0 - 1;
// O <- P . V
fops += 2 * num_cols0 * problem1.n();
// O <- O / S
fops += num_cols0 * problem1.n();
}
}
return double(fops) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Attention>
class TestbedAttention {
public:
//
// Type definitions
//
using scalar_t = typename Attention::GemmKernel::scalar_t;
using accum_t = typename Attention::GemmKernel::accum_t;
using output_t = typename Attention::GemmKernel::output_t;
using output_accum_t = typename Attention::GemmKernel::output_accum_t;
using ElementQ = scalar_t;
using ElementK = scalar_t;
using ElementP = accum_t;
using ElementAccumulator = accum_t;
using ElementV = scalar_t;
using ElementO = output_t;
using ElementOAccum = output_accum_t;
using ElementCompute = accum_t;
using ElementNorm = accum_t;
using ElementSum = accum_t;
using ElementSoftmaxCompute = accum_t;
using LayoutQ = cutlass::layout::RowMajor;
using LayoutK = cutlass::layout::ColumnMajor;
using LayoutP = cutlass::layout::RowMajor;
using LayoutV = cutlass::layout::RowMajor;
using LayoutO = cutlass::layout::RowMajor;
using MatrixCoord = typename LayoutP::TensorCoord;
static bool const kNeedsOutputAccumulatorBuffer = Attention::GemmKernel::kNeedsOutputAccumulatorBuffer;
private:
//
// Data members
//
Options & options;
/// Initialization
cutlass::Distribution::Kind init_Q;
cutlass::Distribution::Kind init_K;
cutlass::Distribution::Kind init_P;
cutlass::Distribution::Kind init_V;
cutlass::Distribution::Kind init_O;
uint32_t seed;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device1;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0_real;
std::vector<int64_t> offset_Q;
std::vector<int64_t> offset_K;
std::vector<int64_t> offset_P;
std::vector<int64_t> offset_V;
std::vector<int64_t> offset_O;
std::vector<int64_t> ldq_host;
std::vector<int64_t> ldk_host;
std::vector<int64_t> ldp_host;
std::vector<int64_t> ldv_host;
std::vector<int64_t> ldo_host;
std::vector<int64_t> seqlen_host;
cutlass::DeviceAllocation<int64_t> ldq;
cutlass::DeviceAllocation<int64_t> ldk;
cutlass::DeviceAllocation<int64_t> ldp;
cutlass::DeviceAllocation<int64_t> ldv;
cutlass::DeviceAllocation<int64_t> ldo;
cutlass::DeviceAllocation<int64_t> seqlen;
cutlass::DeviceAllocation<ElementQ> block_Q;
cutlass::DeviceAllocation<ElementK> block_K;
cutlass::DeviceAllocation<ElementP> block_P;
cutlass::DeviceAllocation<ElementV> block_V;
cutlass::DeviceAllocation<ElementO> block_O;
cutlass::DeviceAllocation<ElementOAccum> block_O_accumulate;
cutlass::DeviceAllocation<ElementNorm> block_Norm;
cutlass::DeviceAllocation<ElementSum> block_Sum;
cutlass::DeviceAllocation<int64_t> offset_P_Device;
cutlass::DeviceAllocation<ElementQ *> ptr_Q;
cutlass::DeviceAllocation<ElementK *> ptr_K;
cutlass::DeviceAllocation<ElementP *> ptr_P;
cutlass::DeviceAllocation<ElementV *> ptr_V;
cutlass::DeviceAllocation<ElementO *> ptr_O;
cutlass::DeviceAllocation<ElementOAccum *> ptr_O_accumulate;
public:
//
// Methods
//
TestbedAttention(
Options &options_,
cutlass::Distribution::Kind init_Q_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_K_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_P_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_V_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_O_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
):
options(options_), init_Q(init_Q_), init_K(init_K_), init_P(init_P_), init_V(init_V_), init_O(init_O_), seed(seed_) { }
int problem_count() const {
return (options.head_number * options.batch_size);
}
private:
/// Helper to initialize a tensor view
template <typename Element>
void initialize_tensor_(
Element *ptr,
size_t capacity,
cutlass::Distribution::Kind dist_kind,
uint32_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<ElementP>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 8;
scope_min = -8;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
ptr, capacity, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::device::BlockFillRandomGaussian(
ptr, capacity, seed, Element(), Element(0.5f));
}
else if (dist_kind == cutlass::Distribution::Sequential) {
// Fill with increasing elements
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(1), Element());
}
else {
// Fill with all 1s
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(), Element(1));
}
}
/// Initializes data structures
void initialize_() {
//
// Set scalors for the mha example
//
options.alpha0 = 1.0f / sqrt(float(options.head_size));
options.alpha1 = 1.0f;
options.beta = 0;
//
// Choose random problem sizes
//
// construct a few problems of random sizes
srand(seed);
int64_t total_elements_Q = 0;
int64_t total_elements_K = 0;
int64_t total_elements_P = 0;
int64_t total_elements_V = 0;
int64_t total_elements_O = 0;
ldq_host.resize(problem_count());
ldk_host.resize(problem_count());
ldp_host.resize(problem_count());
ldv_host.resize(problem_count());
ldo_host.resize(problem_count());
seqlen_host.resize(problem_count());
for (int32_t i = 0; i < problem_count(); ++i) {
auto problem0 = options.problem_sizes0.at(i);
auto problem1 = options.problem_sizes1.at(i);
ldq_host.at(i) = LayoutQ::packed({problem0.m(), problem0.k()}).stride(0);
ldk_host.at(i) = LayoutK::packed({problem0.k(), problem0.n()}).stride(0);
ldp_host.at(i) = LayoutP::packed({problem0.m(), problem0.n()}).stride(0);
ldv_host.at(i) = LayoutV::packed({problem1.k(), problem1.n()}).stride(0);
ldo_host.at(i) = LayoutO::packed({problem1.m(), problem1.n()}).stride(0);
// m = n for attention problems.
seqlen_host.at(i) = problem0.m();
offset_Q.push_back(total_elements_Q);
offset_K.push_back(total_elements_K);
offset_P.push_back(total_elements_P);
offset_V.push_back(total_elements_V);
offset_O.push_back(total_elements_O);
int64_t elements_Q = problem0.m() * problem0.k();
int64_t elements_K = problem0.k() * problem0.n();
int64_t elements_P = problem0.m() * problem0.n();
int64_t elements_V = problem1.k() * problem1.n();
int64_t elements_O = problem1.m() * problem1.n();
total_elements_Q += elements_Q;
total_elements_K += elements_K;
total_elements_P += elements_P;
total_elements_V += elements_V;
total_elements_O += elements_O;
}
problem_sizes_device0.reset(problem_count());
problem_sizes_device1.reset(problem_count());
problem_sizes_device0.copy_from_host(options.problem_sizes0.data());
problem_sizes_device1.copy_from_host(options.problem_sizes1.data());
if (options.use_mask) {
problem_sizes_device0_real.reset(problem_count());
problem_sizes_device0_real.copy_from_host(options.problem_sizes0_real.data());
}
ldq.reset(problem_count());
ldk.reset(problem_count());
ldp.reset(problem_count());
ldv.reset(problem_count());
ldo.reset(problem_count());
seqlen.reset(problem_count());
ldq.copy_from_host(ldq_host.data());
ldk.copy_from_host(ldk_host.data());
ldp.copy_from_host(ldp_host.data());
ldv.copy_from_host(ldv_host.data());
ldo.copy_from_host(ldo_host.data());
seqlen.copy_from_host(seqlen_host.data());
//
// Assign pointers
//
block_Q.reset(total_elements_Q);
block_K.reset(total_elements_K);
block_P.reset(total_elements_P);
block_V.reset(total_elements_V);
block_O.reset(total_elements_O);
if (kNeedsOutputAccumulatorBuffer) {
block_O_accumulate.reset(total_elements_O);
}
offset_P_Device.reset(problem_count());
// sync offset with device
cutlass::device_memory::copy_to_device(offset_P_Device.get(), offset_P.data(), offset_P.size());
std::vector<ElementQ *> ptr_Q_host(problem_count());
std::vector<ElementK *> ptr_K_host(problem_count());
std::vector<ElementP *> ptr_P_host(problem_count());
std::vector<ElementV *> ptr_V_host(problem_count());
std::vector<ElementO *> ptr_O_host(problem_count());
std::vector<ElementOAccum *> ptr_O_accumulate_host(problem_count());
std::vector<ElementNorm *> ptr_norm_host(problem_count());
std::vector<ElementSum *> ptr_sum_host(problem_count());
for (int32_t i = 0; i < problem_count(); ++i) {
ptr_Q_host.at(i) = block_Q.get() + offset_Q.at(i);
ptr_K_host.at(i) = block_K.get() + offset_K.at(i);
ptr_P_host.at(i) = block_P.get() + offset_P.at(i);
ptr_V_host.at(i) = block_V.get() + offset_V.at(i);
ptr_O_host.at(i) = block_O.get() + offset_O.at(i);
if (kNeedsOutputAccumulatorBuffer) {
ptr_O_accumulate_host.at(i) = block_O_accumulate.get() + offset_O.at(i);
}
}
ptr_Q.reset(problem_count());
ptr_Q.copy_from_host(ptr_Q_host.data());
ptr_K.reset(problem_count());
ptr_K.copy_from_host(ptr_K_host.data());
ptr_P.reset(problem_count());
ptr_P.copy_from_host(ptr_P_host.data());
ptr_V.reset(problem_count());
ptr_V.copy_from_host(ptr_V_host.data());
ptr_O.reset(problem_count());
ptr_O.copy_from_host(ptr_O_host.data());
if (kNeedsOutputAccumulatorBuffer) {
ptr_O_accumulate.reset(problem_count());
ptr_O_accumulate.copy_from_host(ptr_O_accumulate_host.data());
}
//
// Initialize the problems of the workspace
//
initialize_tensor_(block_Q.get(), total_elements_Q, init_Q, seed + 1);
initialize_tensor_(block_K.get(), total_elements_K, init_K, seed + 2);
initialize_tensor_(block_V.get(), total_elements_V, init_V, seed + 3);
}
template<typename Element>
bool verify_tensor_(std::vector<Element> vector_Input, \
std::vector<Element> vector_Input_Ref,
int64_t verify_length = -1) {
int64_t size = (vector_Input.size() < vector_Input_Ref.size()) ? vector_Input.size() : vector_Input_Ref.size();
size = (verify_length == -1) ? size : verify_length;
// 0.05 for absolute error
float abs_tol = 5e-2f;
// 10% for relative error
float rel_tol = 1e-1f;
for (int64_t i = 0; i < size; ++i) {
float diff = (float)(vector_Input.at(i) - vector_Input_Ref.at(i));
float abs_diff = fabs(diff);
float abs_ref = fabs((float)vector_Input_Ref.at(i) + 1e-5f);
float relative_diff = abs_diff / abs_ref;
if ( (isnan(abs_diff) || isinf(abs_diff)) || (abs_diff > abs_tol && relative_diff > rel_tol)) {
printf("[%d/%d] diff = %f, rel_diff = %f, {computed=%f, ref=%f}.\n", int(i), int(size), abs_diff, relative_diff, (float)(vector_Input.at(i)), (float)(vector_Input_Ref.at(i)));
return false;
}
}
return true;
}
/// Verifies the result is a GEMM
bool verify_() {
bool passed = true;
for (int32_t i = 0; i < problem_count(); ++i) {
cutlass::gemm::GemmCoord problem0 = options.problem_sizes0.at(i);
cutlass::gemm::GemmCoord problem1 = options.problem_sizes1.at(i);
LayoutQ layout_Q(ldq_host.at(i));
LayoutK layout_K(ldk_host.at(i));
LayoutP layout_P(ldp_host.at(i));
LayoutV layout_V(ldv_host.at(i));
LayoutO layout_O(ldo_host.at(i));
MatrixCoord extent_Q{problem0.m(), problem0.k()};
MatrixCoord extent_K{problem0.k(), problem0.n()};
MatrixCoord extent_P{problem0.m(), problem0.n()};
MatrixCoord extent_V{problem1.k(), problem1.n()};
MatrixCoord extent_O{problem1.m(), problem1.n()};
cutlass::TensorView<ElementQ, LayoutQ> view_Q(block_Q.get() + offset_Q.at(i), layout_Q, extent_Q);
cutlass::TensorView<ElementK, LayoutK> view_K(block_K.get() + offset_K.at(i), layout_K, extent_K);
cutlass::TensorView<ElementP, LayoutP> view_P(block_P.get() + offset_P.at(i), layout_P, extent_P);
cutlass::TensorView<ElementV, LayoutV> view_V(block_V.get() + offset_V.at(i), layout_V, extent_V);
cutlass::DeviceAllocation<ElementP> block_Ref(layout_P.capacity(extent_P));
cutlass::TensorView<ElementP, LayoutP> view_Ref_device(block_Ref.get(), layout_P, extent_P);
cutlass::DeviceAllocation<ElementO> block_Ref_O(layout_O.capacity(extent_O));
cutlass::TensorView<ElementO, LayoutO> view_Ref_O_device(block_Ref_O.get(), layout_O, extent_O);
cutlass::reference::device::TensorFill(view_Ref_O_device, ElementO(0));
// Reference GEMM
cutlass::reference::device::GemmComplex<
ElementQ, LayoutQ,
ElementK, LayoutK,
ElementP, LayoutP,
ElementCompute, ElementAccumulator
>(
problem0,
ElementAccumulator(options.alpha0),
view_Q,
Attention::GemmKernel::MM0::Mma::kTransformA,
view_K,
Attention::GemmKernel::MM0::Mma::kTransformB,
ElementAccumulator(options.beta),
view_P,
view_Ref_device,
ElementAccumulator(0)
);
// Compute softmax for P. We need to explicitly compute softmax
// over P because softmax is fused to the second GEMM in the
// profiled implementation.
std::vector<ElementP> matrix_Ref(layout_P.capacity(extent_P));
cutlass::device_memory::copy_to_host(matrix_Ref.data(), block_Ref.get(), matrix_Ref.size());
cutlass::TensorView<ElementP, LayoutP> view_Ref_host(matrix_Ref.data(), layout_P, extent_P);
std::vector<ElementNorm> vector_Norm_Ref(problem0.m());
std::vector<ElementSum> vector_Sum_Ref(problem0.m());
int n_dim = options.use_mask ? options.problem_sizes0_real.at(i).n() : problem0.n();
// Compute softmax for reference matrix
for (int m = 0; m < problem0.m(); m++) {
int n_dim_row = n_dim;
if (options.causal) {
n_dim_row = std::min(m + 1, n_dim);
}
ElementSoftmaxCompute max = ElementSoftmaxCompute(view_Ref_host.ref().at({m, 0}));
for (int n = 1; n < n_dim_row; n++) {
max = std::max(max, ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})));
}
vector_Norm_Ref.at(m) = ElementNorm(max);
ElementSoftmaxCompute sum = ElementSoftmaxCompute();
for (int n = 0; n < n_dim_row; n++) {
sum += std::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max );
}
ElementSoftmaxCompute inv_sum = ElementSoftmaxCompute(1.0f / sum);
vector_Sum_Ref.at(m) = ElementSum(inv_sum);
for (int n = 0; n < n_dim_row; n++) {
view_Ref_host.ref().at({m, n}) = ElementP(
std::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max ) * inv_sum
);
}
// Mask out the rest of the attention matrix
for (int n = n_dim_row; n < n_dim; ++n) {
view_Ref_host.ref().at({m, n}) = ElementP(0);
}
}
// when not using mask, problem_real and problem share the same sizes
if (options.use_mask) {
for (int m = 0; m < problem0.m(); m++) {
for (int n = n_dim; n < problem0.n(); n++) {
view_Ref_host.ref().at({m, n}) = ElementP(0);
}
}
}
cutlass::device_memory::copy_to_device(block_P.get() + offset_P.at(i), matrix_Ref.data(), matrix_Ref.size());
// Reference GEMM
cutlass::reference::device::GemmComplex<
ElementP, LayoutP,
ElementV, LayoutV,
ElementO, LayoutO,
ElementCompute, ElementAccumulator
>(
problem1,
ElementAccumulator(options.alpha1),
view_P,
Attention::GemmKernel::MM0::Mma::kTransformA,
view_V,
Attention::GemmKernel::MM0::Mma::kTransformB,
ElementAccumulator(options.beta),
view_Ref_O_device,
view_Ref_O_device,
ElementAccumulator(0)
);
// Copy to host memory
cutlass::TensorView<ElementP, LayoutP> view_Ref(matrix_Ref.data(), layout_P, extent_P);
std::vector<ElementO> matrix_O(layout_O.capacity(extent_O));
cutlass::device_memory::copy_to_host(matrix_O.data(), block_O.get() + offset_O.at(i), matrix_O.size());
std::vector<ElementO> matrix_Ref_O(layout_O.capacity(extent_O));
cutlass::device_memory::copy_to_host(matrix_Ref_O.data(), block_Ref_O.get(), matrix_Ref_O.size());
bool verified_O = false;
if (!verified_O) {
verified_O = verify_tensor_<ElementO>(matrix_O, matrix_Ref_O);
}
passed = passed && verified_O;
if (!passed) {
std::cerr << "\n***\nError - problem " << i << " failed the QA check\n***\n" << std::endl;
if (!verified_O) {
std::cout << "Final matrix output is incorrect" << std::endl;
}
return passed;
}
}
return passed;
}
public:
/// Executes a CUTLASS Attention kernel and measures runtime.
Result profile() {
Result result;
result.passed = false;
int threadblock_count = Attention::sufficient(options.problem_sizes1.data(), options.problem_count);
// Early exit
if (!threadblock_count) {
std::cout << "Active CUDA device lacks hardware resources to run CUTLASS Grouped FMHA kernel." << std::endl;
return result;
}
result.passed = false;
// Initialize the problem
initialize_();
typename Attention::Arguments args(
problem_sizes_device0.get(),
problem_sizes_device1.get(),
options.problem_count,
threadblock_count,
ptr_Q.get(),
ptr_K.get(),
ptr_P.get(),
ptr_V.get(),
ptr_O.get(),
ptr_O_accumulate.get(),
ldq.get(),
ldk.get(),
ldp.get(),
ldv.get(),
ldo.get(),
options.causal,
options.alpha0,
options.problem_sizes1.data()
);
Attention fmha;
size_t workspace_size = fmha.get_workspace_size(args);
cutlass::DeviceAllocation<uint8_t> workspace(workspace_size);
result.status = fmha.initialize(args, workspace.get());
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to initialize CUTLASS Grouped FMHA kernel." << std::endl;
return result;
}
// Run the grouped FMHA object
result.status = fmha.run();
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS Grouped FMHA kernel." << std::endl;
return result;
}
// Wait for completion
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Verify correctness
//
result.passed = true;
if (options.reference_check) {
result.passed = verify_();
}
//
// Warm-up run of the grouped FMHA object
//
result.status = fmha.run();
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS Grouped FMHA kernel." << std::endl;
return result;
}
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of FMHA operations
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < this->options.iterations; ++iter) {
fmha();
}
//
// Stop profiling loop
//
// Record an event when the GEMM operations have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(this->options.iterations);
result.gflops = this->options.gflops(result.runtime_ms / 1000.0);
//
// Cleanup
//
for (auto event : events) {
(void)cudaEventDestroy(event);
}
std::cout << std::endl;
std::cout << "CUTLASS Attention:\n"
<< "====================================================" << std::endl;
std::cout << " " << " {seq length Q, seq length KV, head size, head size V, head number, batch size} = {" << options.seq_length \
<< ", " << options.seq_length_kv << ", " << options.head_size << ", " << options.head_size_v << ", " << options.head_number\
<< ", " << options.batch_size << "}." << std::endl;
options.print_problems();
std::cout << std::endl;
std::cout << " " << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " " << "GFLOPs: " << result.gflops << std::endl;
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
int kQueriesPerBlock,
int kKeysPerBlock,
int kMaxK,
cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode_
>
int run_grouped(Options& options) {
using AttentionKernel = typename cutlass::gemm::kernel::DefaultFMHAGrouped<
cutlass::half_t, // scalar_t
cutlass::arch::Sm80, // ArchTag
true, // Memory is aligned
kQueriesPerBlock,
kKeysPerBlock,
kMaxK,
GroupScheduleMode_
>::FMHAKernel;
using FMHA = cutlass::gemm::device::GemmGrouped<AttentionKernel>;
//
// Test and profile
//
TestbedAttention<FMHA> testbed(options);
Result result = testbed.profile();
if (!result.passed) {
std::cout << "Profiling CUTLASS attention has failed.\n";
std::cout << "\nFailed\n";
return -1;
}
std::cout << "\nPassed\n";
return 0;
}
template <
int kQueriesPerBlock,
int kKeysPerBlock,
int kMaxK
>
int run_attention(Options& options) {
if (options.scheduler_mode == cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly) {
return run_grouped<kQueriesPerBlock,
kKeysPerBlock,
kMaxK,
cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly>(options);
} else {
return run_grouped<kQueriesPerBlock,
kKeysPerBlock,
kMaxK,
cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute>(options);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
//
// This example uses mma.sync to directly access Tensor Cores to achieve peak performance.
//
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) {
//
// This example requires an NVIDIA Ampere-architecture GPU.
//
std::cout
<< "CUTLASS's CUTLASS Attention example requires a GPU of NVIDIA's Ampere Architecture or "
<< "later (compute capability 80 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.error) {
std::cerr << "Aborting execution." << std::endl;
return -1;
}
if (options.use_mask) {
std::cerr << "--use_mask is not supported at the moment\n";
return -2;
}
if (options.alignment != 1) {
std::cerr << "--alignment=1 is the only supported value\n";
return -2;
}
// Determine kernel configuration based on head size.
// If head size is less than or equal to 64, each block operates over 64 queries and
// 64 keys, and partial results can be stored in the register file.
// If head size is greater than 64, each block operates over 32 queries and 128 keys,
// and partial results are stored in shared memory.
if (options.head_size_v > 64) {
static int const kQueriesPerBlock = 32;
static int const kKeysPerBlock = 128;
if (options.head_size_v <= kKeysPerBlock) {
return run_attention<kQueriesPerBlock, kKeysPerBlock, 128>(options);
} else {
return run_attention<kQueriesPerBlock, kKeysPerBlock, 65536>(options);
}
} else {
static constexpr int kMaxK = 64; // <- Decrease to 32/16 if your problem is smaller
static int const kQueriesPerBlock = 64;
static int const kKeysPerBlock = 64;
return run_attention<kQueriesPerBlock, kKeysPerBlock, kMaxK>(options);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/fused_multihead_attention_variable_seqlen.cu/0 | {
"file_path": "examples/41_fused_multi_head_attention/fused_multihead_attention_variable_seqlen.cu",
"repo_id": "examples",
"token_count": 16103
} | 12 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cmath>
#include <type_traits>
#include <vector>
#include <cuda_fp16.h>
#include <curand_kernel.h>
#ifdef HAS_PYTORCH
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAGraphsUtils.cuh>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/scale_type.h"
#include "cutlass/fast_math.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/vector.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "debug_utils.h"
#include "gemm_kernel_utils.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/threadblock/epilogue_smem_accumulator.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/integer_subbyte.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/platform/platform.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/vector_iterator.h"
#include "epilogue/epilogue_pipelined.h"
#include "iterators/epilogue_predicated_tile_iterator.h"
#include "gemm/custom_mma.h"
#include "gemm/find_default_mma.h"
#include "gemm/mma_accum_lambda_iterator.h"
#include "gemm/mma_from_smem.h"
#include "transform/tile_smem_loader.h"
#include <inttypes.h>
using namespace gemm_kernel_utils;
namespace {
template <typename FragmentType, int32_t kNumThreads>
struct GmemTile {
/*
Helper functions to efficient store/load RF to gmem
GEMM accumulators have a particular format on A100, and
it takes some compute/shared-memory to rearrange them to
a RowMajor or ColumnMajor format in global memory through
an Epilogue. The same complexity goes for loading into RF.
This class loads/stores RF as they are, and can be used for
efficient accumulation across gemms for instance:
```
GmemTile tile;
for (int i = 0; i < N; ++i) {
// ...
Fragment accum;
if (i == 0) {
accum.clear();
} else {
tile.load(accum);
}
mma(accum, ...);
if (i < N-1) {
// Store for next GEMM
tile.store(accum);
} else {
// Store in tensor (eg RowMajor)
epilogue(accum);
}
// ...
}
```
*/
// 128bits per thread
using AccessType = cutlass::Array<float, 4>;
static constexpr int32_t kBytes = sizeof(AccessType);
static constexpr int32_t kStride = kNumThreads * AccessType::kElements;
static constexpr int32_t kNumIters =
FragmentType::kElements / AccessType::kElements;
static constexpr int32_t kElementsStored =
kNumThreads * FragmentType::kElements;
static_assert(
FragmentType::kElements % AccessType::kElements == 0,
"fragment not aligned on 128 bits");
float* ptr;
CUTLASS_DEVICE void load(FragmentType& fragment, int thread_id) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kNumIters; ++i) {
AccessType* __restrict__ gmem_ptr = reinterpret_cast<AccessType*>(
ptr + thread_id * AccessType::kElements + i * kStride);
AccessType sub_fragment;
cutlass::arch::global_load<AccessType, kBytes>(
sub_fragment, gmem_ptr, true);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < AccessType::kElements; ++j) {
fragment[i * AccessType::kElements + j] = sub_fragment[j];
}
}
}
CUTLASS_DEVICE void store(FragmentType const& fragment, int thread_id) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kNumIters; ++i) {
AccessType* __restrict__ gmem_ptr = reinterpret_cast<AccessType*>(
ptr + thread_id * AccessType::kElements + i * kStride);
AccessType sub_fragment;
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < AccessType::kElements; ++j) {
sub_fragment[j] = fragment[i * AccessType::kElements + j];
}
cutlass::arch::global_store<AccessType, kBytes>(
sub_fragment, gmem_ptr, true);
}
}
CUTLASS_DEVICE void storeAtomicAdd(
FragmentType const& fragment,
int thread_id) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kNumIters; ++i) {
float* gmem_ptr = ptr + thread_id * AccessType::kElements + i * kStride;
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < AccessType::kElements; ++j) {
float val = fragment[i * AccessType::kElements + j];
float* ptr = gmem_ptr + j;
atomicAdd(ptr, val);
}
}
}
};
struct AtomicLock {
CUTLASS_DEVICE static void acquire(
int32_t* lock,
int set_val,
int thread_id) {
if (thread_id == 0) {
while (atomicCAS(lock, 0 /*cmp*/, set_val /*setval*/) != set_val) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__nanosleep(40);
#endif
}
}
__syncthreads();
}
CUTLASS_DEVICE static void release(int32_t* lock, int thread_id) {
if (thread_id == 0) {
int status = 0;
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
asm volatile("st.global.release.gpu.b32 [%0], %1;\n"
:
: "l"(lock), "r"(status));
#else
asm volatile("st.global.cg.b32 [%0], %1;\n" : : "l"(lock), "r"(status));
#endif
}
}
};
template <typename scalar_t, typename Arch>
constexpr int getWarpsPerSmBw() {
bool is_half = !cutlass::platform::is_same<scalar_t, float>::value;
if (Arch::kMinComputeCapability >= 80) {
return is_half ? 12 : 8;
}
return 8;
}
} // namespace
template <
// which arch we target (eg `cutlass::arch::Sm80`)
typename ArchTag_,
// input/output type
typename scalar_t_,
// run optimized kernel because memory accesses will be aligned
bool kIsAligned_,
// use dropout if enabled
bool kApplyDropout_,
// when doing a GEMM, preload the next one (uses more shmem)
bool kPreload_,
// block dimensions
int kBlockSizeI_,
int kBlockSizeJ_,
// upperbound on `max(value.shape[-1], query.shape[-1])`
int kMaxK_ = (int)cutlass::platform::numeric_limits<uint32_t>::max(),
// assumes that `cu_seqlen` is None, and
// (1) `num_queries % kBlockSizeI == 0`
// (2) `num_keys % kBlockSizeJ == 0`
bool kKeysQueriesAlignedToBlockSize_ = false,
// Allows to parallelize across keys
bool kEnableSplitKeys_ = true>
struct AttentionBackwardKernel {
enum CustomMaskType {
NoCustomMask = 0,
CausalFromTopLeft = 1,
CausalFromBottomRight = 2,
NumCustomMaskTypes,
};
using scalar_t = scalar_t_;
using output_t = scalar_t;
using output_accum_t = float;
using lse_scalar_t = float;
using accum_t = float;
using ArchTag = ArchTag_;
static constexpr bool kIsAligned = kIsAligned_;
static constexpr bool kApplyDropout = kApplyDropout_;
static constexpr bool kPreload = kPreload_;
static constexpr int kBlockSizeI = kBlockSizeI_;
static constexpr int kBlockSizeJ = kBlockSizeJ_;
static constexpr int kMaxK = kMaxK_;
static constexpr bool kKeysQueriesAlignedToBlockSize =
kKeysQueriesAlignedToBlockSize_;
static constexpr int64_t kWarpSize = 32;
// If this is true, we store and accumulate dK/dV in RF
// rather than going back to gmem everytime
static constexpr bool kIsHalf = cutlass::sizeof_bits<scalar_t>::value <= 16;
static constexpr bool kOutputInRF = kIsHalf && kMaxK <= kBlockSizeI;
static_assert(
!kPreload ||
(kIsHalf && ArchTag::kMinComputeCapability >= 80 && kOutputInRF),
"preload MMA not supported");
static constexpr bool kPrologueQK = kPreload;
static constexpr bool kPrologueGV = kPreload;
static constexpr bool kPrologueDOV = kPreload;
static constexpr bool kPrologueGQ = kPreload;
static constexpr bool kPrologueGK = kPreload;
static constexpr int64_t kNumWarpsPerBlock =
(kBlockSizeI * kBlockSizeJ) / (32 * 32);
// Compute delta for the f16 kernels
// TODO: Figure out why it's slower on the f32 kernels
// (something due to RF pressure?)
// TODO: Remove condition on `kOutputInRF` - this is needed to work
// around a compiler bug on V100, not exactly sure why but I spent
// too much time on this already. Reproducible with
// (B, Mq, Mkv, K) = (1, 1, 1, 136) for instance
static constexpr bool kKernelComputesDelta =
kIsHalf && (kOutputInRF || ArchTag::kMinComputeCapability != 70);
// Launch bounds
static constexpr int64_t kNumThreads = kWarpSize * kNumWarpsPerBlock;
static constexpr int64_t kMinBlocksPerSm =
getWarpsPerSmBw<scalar_t, ArchTag>() / kNumWarpsPerBlock;
using GemmType = DefaultGemmType<ArchTag, scalar_t>;
using DefaultConfig =
typename cutlass::gemm::device::DefaultGemmConfiguration<
typename GemmType::OpClass,
ArchTag,
scalar_t,
scalar_t,
scalar_t, // ElementC
accum_t // ElementAccumulator
>;
static constexpr auto kOptimalAlignement = cutlass::platform::max(
DefaultConfig::kAlignmentA,
DefaultConfig::kAlignmentB);
static constexpr auto kMinimumAlignment = GemmType::kMinimumAlignment;
struct MatmulQK {
/*
attn_T = k_j @ q_i.transpose(-2, -1) # matmul
attn_T = (attn_T - logsumexp[i_start:i_end].unsqueeze(1).transpose(-2,
-1)).exp() # epilogue
with attn_T.shape = (kBlockSizeJ, kBlockSizeI)
*/
using ThreadblockShape =
cutlass::gemm::GemmShape<kBlockSizeJ, kBlockSizeI, GemmType::ThreadK>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
using DefaultMma = typename cutlass::gemm::threadblock::DefaultMma<
scalar_t, // ElementA
cutlass::layout::RowMajor, // LayoutA
kIsAligned ? DefaultConfig::kAlignmentA : GemmType::kMinimumAlignment,
scalar_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment,
accum_t, // ElementC
cutlass::layout::RowMajor, // LayoutC
typename GemmType::OpClass,
ArchTag,
ThreadblockShape,
WarpShape,
typename GemmType::InstructionShape,
DefaultConfig::kStages,
typename GemmType::Operator,
false, // AccumulatorsInRowMajor = false,
cutlass::gemm::SharedMemoryClearOption::kNone>;
using MmaCore = typename DefaultMma::MmaCore;
using Mma =
typename MakeCustomMma<typename DefaultMma::ThreadblockMma, kMaxK>::Mma;
// used for efficient load of bias tile (Bij) from global memory to shared
// memory
using BiasLoader = TileSmemLoader<
scalar_t,
// Bij is applied to transposed attn matrix tile (Pij.T). Bij is loaded
// row-major but needs to have transposed shape so we get the same
// elements.
cutlass::MatrixShape<ThreadblockShape::kN, ThreadblockShape::kM>,
MmaCore::kThreads,
// input restriction: kv_len has to be a multiple of this value
128 / cutlass::sizeof_bits<scalar_t>::value>;
// Epilogue to store to shared-memory in a format that we can use later for
// the second matmul
using B2bGemm = typename cutlass::gemm::threadblock::B2bGemm<
typename Mma::Operator::IteratorC,
typename Mma::Operator,
scalar_t,
WarpShape,
ThreadblockShape>;
using AccumLambdaIterator = typename DefaultMmaAccumLambdaIterator<
typename Mma::Operator::IteratorC,
accum_t,
kWarpSize>::Iterator;
using AccumulatorSharedStorage = typename B2bGemm::AccumulatorSharedStorage;
};
struct MatmulGradV {
/*
grad_v[j_start:j_end] += attn_T @ do_i # matmul
Dimensions: (kBlockSizeJ * kNumWarpsPerBlock, kBlockSizeI, K)
(we might need to iterate multiple times on K)
*/
using ThreadblockShape =
cutlass::gemm::GemmShape<kBlockSizeJ, kBlockSizeI, GemmType::ThreadK>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
using InstructionShape = typename GemmType::InstructionShape;
using DefaultGemm = cutlass::gemm::kernel::DefaultGemm<
scalar_t, // ElementA,
cutlass::layout::RowMajor, // LayoutA,
DefaultConfig::kAlignmentA,
scalar_t, // ElementB,
cutlass::layout::RowMajor, // LayoutB,
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment,
output_t,
cutlass::layout::RowMajor, // LayoutC,
accum_t,
typename GemmType::OpClass,
ArchTag,
ThreadblockShape,
WarpShape,
typename GemmType::InstructionShape,
typename DefaultConfig::EpilogueOutputOp,
void, // ThreadblockSwizzle - not used
DefaultConfig::kStages,
false, // SplitKSerial
typename GemmType::Operator>;
// if dropout:
// for computing dVj += (Pij.T * Zij) @ dOi
// Pij_dropped.T = Pij.T * Zij is computed on the fly as fragments of
// Pij.T are loaded in. The reason we do it this way is because Pij.T and
// Zij are reused in later steps, while Pij_dropped.T is only needed in
// this step. computing Pij_dropped.T on the fly allows us to avoid
// keeping all 3 of Pij_dropped.T, Pij.T, and Zij in shared memory at the
// same time.
// if no dropout:
// for computing dVj += Pij.T @ dOi
using WarpIteratorA = typename cutlass::gemm::threadblock::
DefaultWarpIteratorAFromSharedMemory<
typename DefaultGemm::Mma::Operator::Shape, // WarpShape
typename DefaultGemm::Mma::Operator::
InstructionShape, // InstructionShape
typename DefaultGemm::Mma::Operator::
IteratorA, // RegularWarpIterator
typename DefaultGemm::Mma::Policy // Policy
>::WarpIterator;
using DefaultMmaFromSmem =
typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
typename DefaultGemm::Mma,
MatmulQK::AccumulatorSharedStorage::Shape::kN,
WarpIteratorA,
kApplyDropout>; // kScaleOperandA
using Mma = typename DefaultMmaFromSmem::Mma;
using IteratorB = typename Mma::IteratorB;
using WarpCount = typename Mma::WarpCount;
// Epilogue
using DefaultOutputOp = typename DefaultConfig::EpilogueOutputOp;
using DefaultEpilogue = typename DefaultGemm::Epilogue;
using OutputTileIterator =
typename cutlass::epilogue::threadblock::MakePrefetchableIterator<
typename DefaultEpilogue::OutputTileIterator>::Iterator;
using AccumTileGmem = GmemTile<typename Mma::FragmentC, (int)kNumThreads>;
};
struct MatmulDOIVJ {
/*
doi_t_vj = do_i @ v_j.transpose(-2, -1) # matmul
tmp = (doi_t_vj - Di.unsqueeze(1)) * attn # inplace / epilogue?
*/
using ThreadblockShape =
cutlass::gemm::GemmShape<kBlockSizeI, kBlockSizeJ, GemmType::ThreadK>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
using ElementC = output_t;
using ElementAccum = accum_t;
// no-op output op - epilogue just stores result to global memory
using BiasGradEpilogueOutputOp =
typename cutlass::epilogue::thread::LinearCombination<
ElementC,
DefaultConfig::EpilogueOutputOp::kCount,
typename DefaultConfig::EpilogueOutputOp::ElementAccumulator,
typename DefaultConfig::EpilogueOutputOp::ElementCompute,
cutlass::epilogue::thread::ScaleType::Nothing>;
using DefaultGemm = typename cutlass::gemm::kernel::DefaultGemm<
scalar_t, // ElementA
cutlass::layout::RowMajor, // LayoutA
kIsAligned ? DefaultConfig::kAlignmentA : GemmType::kMinimumAlignment,
scalar_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment,
ElementC, // ElementC
cutlass::layout::RowMajor, // LayoutC
ElementAccum, // ElementAccumulator
typename GemmType::OpClass,
ArchTag,
ThreadblockShape,
WarpShape,
typename GemmType::InstructionShape,
BiasGradEpilogueOutputOp, // EpilogueOutputOp
void, // ThreadblockSwizzle (not used)
// multiple preloads, dropout Zij tile, and 3 stages push us over shared
// memory capacity on A100. set a ceiling on number of stages to save
// shared memory if dropout is in use.
kPreload && kApplyDropout && (kBlockSizeI * kBlockSizeJ > 64 * 64)
? cutlass::const_min(2, DefaultConfig::kStages)
: DefaultConfig::kStages, // Stages
false, // SplitKSerial
typename GemmType::Operator,
cutlass::gemm::SharedMemoryClearOption::kNone>;
using Mma = typename MakeCustomMma<typename DefaultGemm::Mma, kMaxK>::Mma;
using AccumLambdaIterator = typename DefaultMmaAccumLambdaIterator<
typename Mma::Operator::IteratorC,
ElementAccum,
kWarpSize>::Iterator;
// epilogue used to write bias gradient, which is just the output of this
// matmul with some operations applied to the fragment
using BiasGradEpilogue = typename DefaultGemm::Epilogue;
// Epilogue to store to shared-memory in a format that we can use later for
// the second matmul
using B2bGemm = typename cutlass::gemm::threadblock::B2bGemm<
typename DefaultGemm::Mma::Operator::IteratorC,
typename DefaultGemm::Mma::Operator,
scalar_t,
WarpShape,
ThreadblockShape>;
using AccumulatorSharedStorage = typename B2bGemm::AccumulatorSharedStorage;
};
struct MatmulGradQ {
// grad_q <- tmp @ k_j
using ThreadblockShape =
cutlass::gemm::GemmShape<kBlockSizeI, kBlockSizeJ, GemmType::ThreadK>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
using InstructionShape = typename GemmType::InstructionShape;
using DefaultGemm = cutlass::gemm::kernel::DefaultGemm<
scalar_t, // ElementA,
cutlass::layout::RowMajor, // LayoutA,
DefaultConfig::kAlignmentA,
scalar_t, // ElementB,
cutlass::layout::RowMajor, // LayoutB,
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment,
output_t,
cutlass::layout::RowMajor, // LayoutC,
accum_t,
typename GemmType::OpClass,
ArchTag,
ThreadblockShape,
WarpShape,
typename GemmType::InstructionShape,
typename DefaultConfig::EpilogueOutputOp,
void, // ThreadblockSwizzle - not used
DefaultConfig::kStages,
false, // SplitKSerial
typename GemmType::Operator>;
using WarpIteratorA = typename cutlass::gemm::threadblock::
DefaultWarpIteratorAFromSharedMemory<
typename DefaultGemm::Mma::Operator::Shape,
typename DefaultGemm::Mma::Operator::InstructionShape,
typename DefaultGemm::Mma::Operator::IteratorA,
typename DefaultGemm::Mma::Policy>::WarpIterator;
using DefaultMmaFromSmem =
typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
typename DefaultGemm::Mma,
MatmulDOIVJ::AccumulatorSharedStorage::Shape::kN,
WarpIteratorA,
false>; // kScaleOperandA
using Mma = typename DefaultMmaFromSmem::Mma;
using IteratorB = typename Mma::IteratorB;
using WarpCount = typename Mma::WarpCount;
// Epilogue
using DefaultOutputOp = typename DefaultConfig::EpilogueOutputOp;
using DefaultEpilogue = typename DefaultGemm::Epilogue;
using OutputTileIterator =
typename cutlass::epilogue::threadblock::MakePrefetchableIterator<
typename DefaultEpilogue::OutputTileIterator>::Iterator;
using AccumTileGmem = GmemTile<typename Mma::FragmentC, (int)kNumThreads>;
};
struct MatmulGradK {
// grad_k <- tmp.transpose(-2, -1) @ q_i
using ThreadblockShape =
cutlass::gemm::GemmShape<kBlockSizeJ, kBlockSizeI, GemmType::ThreadK>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
using InstructionShape = typename GemmType::InstructionShape;
using DefaultGemm = cutlass::gemm::kernel::DefaultGemm<
scalar_t, // ElementA,
cutlass::layout::RowMajor, // LayoutA,
DefaultConfig::kAlignmentA,
scalar_t, // ElementB,
cutlass::layout::RowMajor, // LayoutB,
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment,
output_t,
cutlass::layout::RowMajor, // LayoutC,
accum_t,
typename GemmType::OpClass,
ArchTag,
ThreadblockShape,
WarpShape,
typename GemmType::InstructionShape,
typename DefaultConfig::EpilogueOutputOp,
void, // ThreadblockSwizzle - not used
DefaultConfig::kStages,
false, // SplitKSerial
typename GemmType::Operator>;
using WarpIteratorA = typename cutlass::gemm::threadblock::
DefaultWarpIteratorAFromSharedMemory<
typename DefaultGemm::Mma::Operator::Shape,
typename DefaultGemm::Mma::Operator::InstructionShape,
typename DefaultGemm::Mma::Operator::IteratorA,
typename DefaultGemm::Mma::Policy>::WarpIterator;
using DefaultMmaFromSmemN =
typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
typename DefaultGemm::Mma,
MatmulQK::AccumulatorSharedStorage::Shape::kN, // kMaxK
WarpIteratorA,
false>; // kScaleOperandA
using DefaultMmaFromSmemT =
typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
typename DefaultGemm::Mma,
MatmulDOIVJ::AccumulatorSharedStorage::Shape::kM, // kMaxK
WarpIteratorA,
false, // kScaleOperandA
kPreload>; // kTransposeA
using DefaultMmaFromSmem = typename cutlass::platform::conditional<
DefaultMmaFromSmemT::kIsTransposedA,
DefaultMmaFromSmemT,
DefaultMmaFromSmemN>::type;
using Mma = typename DefaultMmaFromSmem::Mma;
using IteratorB = typename Mma::IteratorB;
using WarpCount = typename Mma::WarpCount;
// Epilogue
using DefaultOutputOp = typename DefaultConfig::EpilogueOutputOp;
using DefaultEpilogue = typename DefaultGemm::Epilogue;
using OutputTileIterator =
typename cutlass::epilogue::threadblock::MakePrefetchableIterator<
typename DefaultEpilogue::OutputTileIterator>::Iterator;
using AccumTileGmem = GmemTile<typename Mma::FragmentC, (int)kNumThreads>;
};
static constexpr bool kEnableSplitKeys = kEnableSplitKeys_;
static constexpr bool kNeedsAccumGradQ = kEnableSplitKeys ||
!cutlass::platform::is_same<output_accum_t, output_t>::value;
static constexpr bool kNeedsAccumGradK = !kOutputInRF &&
!cutlass::platform::is_same<output_accum_t, output_t>::value;
static constexpr bool kNeedsAccumGradV = !kOutputInRF &&
!cutlass::platform::is_same<output_accum_t, output_t>::value;
struct GradQTempStorage {
int32_t lock;
int32_t counter;
int32_t pad[2]; // pad to 128bits
output_accum_t buffer[MatmulGradQ::AccumTileGmem::kElementsStored];
};
struct Params {
// Input tensors
scalar_t* query_ptr = nullptr; // [Mq, nH, K]
scalar_t* key_ptr = nullptr; // [Mk, nH, K]
scalar_t* value_ptr = nullptr; // [Mk, nH, Kv]
scalar_t* bias_ptr = nullptr;
lse_scalar_t* logsumexp_ptr = nullptr; // [nH, Mq]
scalar_t* output_ptr = nullptr; // [Mq, nH, Kv]
scalar_t* grad_output_ptr = nullptr; // [Mq, nH, Kv]
accum_t* delta_ptr = nullptr; // [nH, Mq]
int32_t* cu_seqlens_q_ptr = nullptr;
int32_t* cu_seqlens_k_ptr = nullptr;
// Output tensors
output_t* grad_query_ptr = nullptr; // [Mq, nH, K]
output_t* grad_key_ptr = nullptr; // [Mk, nH, K]
output_t* grad_value_ptr = nullptr; // [Mk, nH, Kv]
output_t* grad_bias_ptr = nullptr;
// Accumulators
output_accum_t* workspace = nullptr; // [Mq, Kq] + [Mkv, Kq] + [Mkv, Kv]
output_accum_t* workspace_gv =
nullptr; // (will be calculated by the kernel)
GradQTempStorage* workspace_gq =
nullptr; // (will be calculated by the kernel)
// Scale
accum_t scale = 1.0f;
// Dimensions/strides
int32_t head_dim = -1;
int32_t head_dim_value = -1;
int32_t num_queries = -1;
int32_t num_keys = -1;
int32_t num_heads = -1;
uint8_t custom_mask_type = NoCustomMask;
int32_t q_strideM = -1;
int32_t k_strideM = -1;
int32_t v_strideM = -1;
int32_t bias_strideM = 0;
int32_t gO_strideM = -1;
int32_t gB_strideM = -1;
int8_t gQKV_strideM_multiplier = 1; // 3 for packed, 1 otherwise
#ifdef HAS_PYTORCH
// dropout
at::PhiloxCudaState rng_engine_inputs = {0, 0};
#endif
// RNG sequence offset based on batch_id and head_id
unsigned long long dropout_batch_head_rng_offset = 0;
float dropout_prob = 0.0f;
CUTLASS_HOST_DEVICE int32_t o_strideM() const {
return head_dim_value * num_heads;
}
CUTLASS_HOST_DEVICE int32_t gQ_strideM() const {
return gQKV_strideM_multiplier * num_heads * head_dim;
}
CUTLASS_HOST_DEVICE int32_t gK_strideM() const {
return gQKV_strideM_multiplier * num_heads * head_dim;
}
CUTLASS_HOST_DEVICE int32_t gV_strideM() const {
return gQKV_strideM_multiplier * num_heads * head_dim_value;
}
// Everything below is only used in `advance_to_block`
// and shouldn't use registers
int64_t o_strideH = -1;
int32_t q_strideH = -1;
int32_t k_strideH = -1;
int32_t v_strideH = -1;
int64_t bias_strideH = 0;
int64_t o_strideB = -1;
int64_t q_strideB = -1;
int64_t k_strideB = -1;
int64_t v_strideB = -1;
int64_t bias_strideB = 0;
int64_t lse_strideB = -1;
int64_t lse_strideH = -1;
int64_t delta_strideB = -1;
int64_t delta_strideH = -1;
int32_t num_batches = -1;
int16_t num_splits_key = 1; // We use `gridDim.x` inside kernel
int64_t gO_strideB = 0;
int64_t gQ_strideB = 0;
int64_t gK_strideB = 0;
int64_t gV_strideB = 0;
int64_t gB_strideB = 0;
int64_t gO_strideH = 0;
int64_t gQ_strideH = 0;
int64_t gK_strideH = 0;
int64_t gV_strideH = 0;
int64_t gB_strideH = 0;
CUTLASS_DEVICE int16_t num_splits_key_device() const {
return kEnableSplitKeys ? gridDim.x : 1;
}
CUTLASS_DEVICE int16_t split_key_device() const {
return kEnableSplitKeys ? blockIdx.x : 0;
}
CUTLASS_DEVICE bool advance_to_block() {
int64_t batch_id = blockIdx.z;
int32_t head_id = blockIdx.y;
if (kNeedsAccumGradQ || kNeedsAccumGradK || kNeedsAccumGradV) {
assert(workspace_size() == 0 || workspace != nullptr);
workspace += (batch_id * num_heads + head_id) * workspace_strideBH();
workspace = warp_uniform(workspace);
workspace_gv = workspace + workspace_elements_gk();
workspace_gq =
(GradQTempStorage*)(workspace_gv + workspace_elements_gv());
if (kEnableSplitKeys) {
workspace_gv += workspace_elements_gv() * split_key_device() /
num_splits_key_device();
workspace += workspace_elements_gk() * split_key_device() /
num_splits_key_device();
}
} else {
workspace = nullptr;
}
// Advance pointers that depend on the total concatenated
// number of queries, as `num_queries` is modified in the block
// below
dropout_batch_head_rng_offset =
batch_id * (num_heads * num_queries * num_keys) +
head_id * (num_queries * num_keys);
logsumexp_ptr += batch_id * lse_strideB + head_id * lse_strideH;
if (cu_seqlens_q_ptr != nullptr) {
assert(cu_seqlens_k_ptr != nullptr);
cu_seqlens_q_ptr += batch_id;
cu_seqlens_k_ptr += batch_id;
int32_t q_start = cu_seqlens_q_ptr[0];
int32_t k_start = cu_seqlens_k_ptr[0];
int64_t q_next_start = cu_seqlens_q_ptr[1];
int64_t k_next_start = cu_seqlens_k_ptr[1];
assert(q_next_start - q_start <= num_queries);
assert(k_next_start - k_start <= num_keys);
num_queries = q_next_start - q_start;
num_keys = k_next_start - k_start;
// Jump manually
batch_id = 0;
query_ptr += q_start * q_strideM;
key_ptr += k_start * k_strideM;
value_ptr += k_start * v_strideM;
assert(bias_ptr == nullptr);
assert(grad_bias_ptr == nullptr);
output_ptr += q_start * o_strideM();
grad_output_ptr += q_start * gO_strideM;
delta_ptr += q_start;
grad_query_ptr += q_start * gQ_strideM();
grad_key_ptr += k_start * gK_strideM();
grad_value_ptr += k_start * gV_strideM();
}
query_ptr += batch_id * q_strideB + head_id * q_strideH;
key_ptr += batch_id * k_strideB + head_id * k_strideH;
value_ptr += batch_id * v_strideB + head_id * v_strideH;
if (bias_ptr != nullptr) {
bias_ptr += batch_id * bias_strideB + head_id * bias_strideH;
}
output_ptr += batch_id * o_strideB + head_id * o_strideH;
grad_output_ptr += batch_id * gO_strideB + head_id * gO_strideH;
delta_ptr += batch_id * delta_strideB + head_id * delta_strideH;
grad_query_ptr += batch_id * gQ_strideB + head_id * gQ_strideH;
grad_key_ptr += batch_id * gK_strideB + head_id * gK_strideH;
grad_value_ptr += batch_id * gV_strideB + head_id * gV_strideH;
if (grad_bias_ptr != nullptr) {
grad_bias_ptr += batch_id * gB_strideB + head_id * gB_strideH;
}
// Some values are modified above
// Signal to the compiler that they are the same in all threads
// and can be stored in warp-uniform registers (Sm75+)
num_queries = warp_uniform(num_queries);
num_keys = warp_uniform(num_keys);
custom_mask_type = warp_uniform(custom_mask_type);
query_ptr = warp_uniform(query_ptr);
key_ptr = warp_uniform(key_ptr);
value_ptr = warp_uniform(value_ptr);
bias_ptr = warp_uniform(bias_ptr);
logsumexp_ptr = warp_uniform(logsumexp_ptr);
output_ptr = warp_uniform(output_ptr);
grad_output_ptr = warp_uniform(grad_output_ptr);
delta_ptr = warp_uniform(delta_ptr);
grad_query_ptr = warp_uniform(grad_query_ptr);
grad_key_ptr = warp_uniform(grad_key_ptr);
grad_value_ptr = warp_uniform(grad_value_ptr);
grad_bias_ptr = warp_uniform(grad_bias_ptr);
#if 0
PRINT_T0("[b:%d h:%d] dp[0]:%f Q:%f K:%f V:%f LSE:%f",
int(blockIdx.z), int(blockIdx.y),
float(delta_ptr[0]),
float(query_ptr[0]), float(key_ptr[0]), float(value_ptr[0]),
float(logsumexp_ptr[0])
)
#endif
return true;
}
__host__ dim3 getBlocksGrid() const {
return dim3(num_splits_key, num_heads, num_batches);
}
__host__ dim3 getThreadsGrid() const {
return dim3(kWarpSize * kNumWarpsPerBlock, 1, 1);
}
CUTLASS_HOST_DEVICE int64_t workspace_elements_gk() const {
if (!kNeedsAccumGradK) {
return 0;
}
return num_splits_key * align_up(num_keys, (int32_t)kBlockSizeJ) *
align_up(head_dim, (int32_t)kBlockSizeI);
}
CUTLASS_HOST_DEVICE int64_t workspace_elements_gv() const {
if (!kNeedsAccumGradV) {
return 0;
}
return num_splits_key * align_up(num_keys, (int32_t)kBlockSizeJ) *
align_up(head_dim_value, (int32_t)kBlockSizeI);
}
CUTLASS_HOST_DEVICE int64_t workspace_elements_gq() const {
if (!kNeedsAccumGradQ) {
return 0;
}
int num_blocks = ceil_div(num_queries, kBlockSizeI);
int num_cols = ceil_div(head_dim, MatmulGradQ::ThreadblockShape::kN);
return num_blocks * num_cols * sizeof(GradQTempStorage) /
sizeof(output_accum_t);
}
CUTLASS_HOST_DEVICE int64_t workspace_strideBH() const {
// Aligned on 128bits
return align_up(
workspace_elements_gk() + workspace_elements_gv() +
workspace_elements_gq(),
int64_t(4));
}
CUTLASS_HOST_DEVICE int64_t workspace_size() const {
// Returns size of buffer we need to run this kernel
return num_batches * num_heads * workspace_strideBH() * sizeof(float);
}
CUTLASS_HOST_DEVICE bool should_zero_workspace() const {
return num_splits_key > 1;
}
};
// shared storage for keeping Zij matrix. not needed if we aren't using
// dropout, in which case we use an empty array to save shared memory
using ZijSharedStorage = typename cutlass::platform::conditional<
kApplyDropout,
typename MatmulQK::AccumulatorSharedStorage,
// dummy shared storage object that takes up no space.
typename cutlass::gemm::threadblock::AccumulatorSharedStorage<
#ifdef _WIN32
// windows builds throw the error:
// "type containing an unknown-size array is not allowed"
// if we try to make Zij shared storage zero-sized.
// To get around this just make it sized 1 on windows.
typename cutlass::gemm::GemmShape<1, 1, 0>,
#else
typename cutlass::gemm::GemmShape<0, 0, 0>,
#endif
typename MatmulQK::AccumulatorSharedStorage::Element,
typename MatmulQK::AccumulatorSharedStorage::Layout,
typename cutlass::MatrixShape<0, 0>>>::type;
struct SharedStoragePrologue {
struct {
cutlass::Array<accum_t, kBlockSizeI> di; // (do_i * o_i).sum(-1)
typename MatmulQK::Mma::SharedStorageA mm_qk_k;
} persistent;
union {
struct {
// part1 - after Q.K / dV / dO.V
union {
// 1. efficient load of bias tile Bij, which is then applied to Pij
typename MatmulQK::BiasLoader::SmemTile bias;
// 4. store Pij. it is needed:
// - in dVj += (Pij.T * Zij) @ dOi
// - in dSij = Pij * (dPij - Di)
// 6. dVj += (Pij.T * Zij) @ dOi
// 10. write to fragment
typename MatmulQK::AccumulatorSharedStorage attn_shared_storage;
};
// 5. store Zij. it is needed in dVj += (Pij.T * Zij) @ dOi
ZijSharedStorage zij;
union {
// 2. prologue for dVj
// 6. workspace for dVj += (Pij.T * Zij) @ dOi
typename MatmulGradV::Mma::SharedStorage mm_gradV;
// 7. dVj epilogue
typename MatmulGradV::DefaultEpilogue::SharedStorage gradV_epilogue;
};
// 3. prologue for dPij_dropped
// 8. used in dPij_dropped = dOi @ Vj.T
typename MatmulDOIVJ::Mma::SharedStorage mm_doivj;
} part1;
struct {
// part2 - dQ
union {
typename MatmulQK::AccumulatorSharedStorage
tmpT_shared_storage; // (from part1)
typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage;
};
typename MatmulGradK::Mma::SharedStorage mm_gradK; // (preload)
typename MatmulGradQ::Mma::SharedStorage mm_gradQ; // (preload)
union {
// store dB = dSij to global memory
typename MatmulDOIVJ::BiasGradEpilogue::SharedStorage gradB_epilogue;
typename MatmulGradQ::DefaultEpilogue::SharedStorage gradQ_epilogue;
};
} part2;
struct {
// part3 - after last iteration on dQ's epilogue / dK
union {
typename MatmulQK::AccumulatorSharedStorage
tmpT_shared_storage; // (from part1)
typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage;
};
typename MatmulGradK::Mma::SharedStorage mm_gradK; // (preload)
typename MatmulGradQ::DefaultEpilogue::SharedStorage
gradQ_epilogue_lastIter;
typename MatmulGradK::DefaultEpilogue::SharedStorage gradK_epilogue;
} part3;
struct {
// part4 - after last iteration on dK's epilogue / preload next K.Q_t
typename MatmulQK::Mma::SharedStorageB mm_qk_q;
// If we reach end of current key, dump RF->gmem with "final" epilogues
typename MatmulGradK::DefaultEpilogue::SharedStorage
gradK_epilogue_final;
typename MatmulGradV::DefaultEpilogue::SharedStorage
gradV_epilogue_final;
} part4;
};
static void print_size() {
// Field size
#define FSZ(f) int((sizeof(((SharedStoragePrologue*)0)->f)))
printf("Total smem: %d bytes\n", int(sizeof(SharedStoragePrologue)));
printf(" persistent: %db\n", FSZ(persistent));
printf(" mm_qk_k: %db\n", FSZ(persistent.mm_qk_k));
printf(" part1: %db\n", FSZ(part1));
printf(" bias: %db\n", FSZ(part1.bias));
printf(" attn_shared_storage: %db\n", FSZ(part1.attn_shared_storage));
printf(" zij: %db\n", FSZ(part1.zij));
printf(" mm_gradV: %db\n", FSZ(part1.mm_gradV));
printf(" gradV_epilogue: %db\n", FSZ(part1.gradV_epilogue));
printf(" mm_doivj: %db\n", FSZ(part1.mm_doivj));
printf(" part2: %db\n", FSZ(part2));
printf(" tmpT_shared_storage: %db\n", FSZ(part2.tmpT_shared_storage));
printf(" tmp_shared_storage: %db\n", FSZ(part2.tmp_shared_storage));
printf(" mm_gradK: %db\n", FSZ(part2.mm_gradK));
printf(" mm_gradQ: %db\n", FSZ(part2.mm_gradQ));
printf(" gradB_epilogue: %db\n", FSZ(part2.gradB_epilogue));
printf(" gradQ_epilogue: %db\n", FSZ(part2.gradQ_epilogue));
printf(" part3: %db\n", FSZ(part3));
printf(" tmpT_shared_storage: %db\n", FSZ(part3.tmpT_shared_storage));
printf(" part4: %db\n", FSZ(part4));
printf(" mm_qk_q: %db\n", FSZ(part4.mm_qk_q));
printf(
" gradK_epilogue_final: %db\n", FSZ(part4.gradK_epilogue_final));
printf(
" gradV_epilogue_final: %db\n", FSZ(part4.gradV_epilogue_final));
}
// ===========================================
#define FIELD(INSIDE_STRUCT, FIELDNAME) \
CUTLASS_DEVICE auto& FIELDNAME() { \
return INSIDE_STRUCT.FIELDNAME; \
}
FIELD(persistent, di)
FIELD(persistent, mm_qk_k)
FIELD(part1, bias)
FIELD(part1, attn_shared_storage)
FIELD(part1, zij)
FIELD(part1, mm_gradV)
FIELD(part1, gradV_epilogue)
FIELD(part1, mm_doivj)
FIELD(part2, mm_gradK)
FIELD(part2, mm_gradQ)
FIELD(part2, gradB_epilogue)
FIELD(part2, gradQ_epilogue)
FIELD(part2, tmp_shared_storage)
FIELD(part3, tmpT_shared_storage)
FIELD(part3, gradQ_epilogue_lastIter)
FIELD(part3, gradK_epilogue)
FIELD(part4, mm_qk_q)
FIELD(part4, gradK_epilogue_final)
FIELD(part4, gradV_epilogue_final)
};
struct SharedStorageNoPrologue {
struct {
cutlass::Array<accum_t, kBlockSizeI> di; // (do_i * o_i).sum(-1)
} persistent;
union {
struct {
// part1 - Q.K matmul
typename MatmulQK::Mma::SharedStorageA mm_qk_k;
typename MatmulQK::Mma::SharedStorageB mm_qk_q;
} part1;
struct {
// part2 - compute gradV
union {
// 1. efficient load of bias tile Bij, which is then applied to Pij
typename MatmulQK::BiasLoader::SmemTile bias;
// 2. store Pij to shared memory. it is needed:
// - in this step, where it is used in dVj += (Pij.T * Zij) @ dOi
// - in next step where it is used in dSij = Pij * (dPij - Di)
typename MatmulQK::AccumulatorSharedStorage attn_shared_storage;
};
// 3. store Zij. it is needed in this step, where it is used
// to compute Pij_dropped = Pij * Zij on the fly as fragments of Pij are
// loaded for the computation of dVj.
ZijSharedStorage zij;
union {
typename MatmulGradV::Mma::SharedStorage mm_gradV;
typename MatmulGradV::DefaultEpilogue::SharedStorage gradV_epilogue;
};
} part2;
struct {
// part3 - DO.V matmul
union {
// first compute dPij = (dOi @ Vj.T) * Zij
// and dSij = Pij * (dPij - Di)
struct {
// (from part2) - Pij for computing dSij = Pij * (dPij - Di)
typename MatmulQK::AccumulatorSharedStorage attn_shared_storage;
// matmul to compute dOiVj
typename MatmulDOIVJ::Mma::SharedStorage mm_doivj;
};
// then store dB = dSij to global memory
typename MatmulDOIVJ::BiasGradEpilogue::SharedStorage gradB_epilogue;
};
} part3;
struct {
// part4 - compute gradQ
typename MatmulQK::AccumulatorSharedStorage
tmpT_shared_storage; // (from part2)
typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage;
union {
typename MatmulGradQ::Mma::SharedStorage mm_gradQ;
typename MatmulGradQ::DefaultEpilogue::SharedStorage gradQ_epilogue;
typename MatmulGradQ::DefaultEpilogue::SharedStorage
gradQ_epilogue_lastIter;
};
} part4;
struct {
// part5 - compute gradK
typename MatmulQK::AccumulatorSharedStorage
tmpT_shared_storage; // (from part2)
typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage;
union {
typename MatmulGradK::Mma::SharedStorage mm_gradK;
typename MatmulGradK::DefaultEpilogue::SharedStorage gradK_epilogue;
};
} part5;
struct {
// part6 - store RF accumulated into gmem
typename MatmulGradK::DefaultEpilogue::SharedStorage
gradK_epilogue_final;
typename MatmulGradV::DefaultEpilogue::SharedStorage
gradV_epilogue_final;
} part6;
};
static void print_size() {
#define FIELD_SIZEOF(f) int((sizeof(((SharedStorageNoPrologue*)0)->f)))
printf("Total smem: %d bytes\n", int(sizeof(SharedStorageNoPrologue)));
printf(" persistent: %db\n", FIELD_SIZEOF(persistent));
printf(" part1: %db\n", FIELD_SIZEOF(part1));
printf(" part2: %db\n", FIELD_SIZEOF(part2));
printf(" part3: %db\n", FIELD_SIZEOF(part3));
printf(" part4: %db\n", FIELD_SIZEOF(part4));
printf(" part5: %db\n", FIELD_SIZEOF(part5));
printf(" part6: %db\n", FIELD_SIZEOF(part6));
}
// ===========================================
#define FIELD(INSIDE_STRUCT, FIELDNAME) \
CUTLASS_DEVICE auto& FIELDNAME() { \
return INSIDE_STRUCT.FIELDNAME; \
}
FIELD(persistent, di)
FIELD(part1, mm_qk_k)
FIELD(part1, mm_qk_q)
FIELD(part2, bias)
FIELD(part2, attn_shared_storage)
FIELD(part2, zij)
FIELD(part2, mm_gradV)
FIELD(part2, gradV_epilogue)
FIELD(part3, mm_doivj)
FIELD(part3, gradB_epilogue)
FIELD(part4, tmpT_shared_storage)
FIELD(part4, tmp_shared_storage)
FIELD(part4, mm_gradQ)
FIELD(part4, gradQ_epilogue)
FIELD(part4, gradQ_epilogue_lastIter)
FIELD(part5, mm_gradK)
FIELD(part5, gradK_epilogue)
FIELD(part6, gradK_epilogue_final)
FIELD(part6, gradV_epilogue_final)
};
using SharedStorage = typename cutlass::platform::conditional<
kPreload,
SharedStoragePrologue,
SharedStorageNoPrologue>::type;
struct OutputFragments {
typename MatmulGradV::Mma::FragmentC gradV;
typename MatmulGradK::Mma::FragmentC gradK;
CUTLASS_DEVICE void clear() {
gradV.clear();
gradK.clear();
}
};
static bool __host__ check_supported(Params const& p) {
CHECK_ALIGNED_PTR(p.query_ptr, kMinimumAlignment);
CHECK_ALIGNED_PTR(p.key_ptr, kMinimumAlignment);
CHECK_ALIGNED_PTR(p.value_ptr, kMinimumAlignment);
CHECK_ALIGNED_PTR(p.output_ptr, kMinimumAlignment);
CHECK_ALIGNED_PTR(p.grad_output_ptr, kMinimumAlignment);
CHECK_ALIGNED_PTR(p.bias_ptr, kMinimumAlignment);
XFORMERS_CHECK(p.lse_strideH % 8 == 0, "LSE is not correctly aligned");
XFORMERS_CHECK(p.lse_strideB % 8 == 0, "LSE is not correctly aligned");
XFORMERS_CHECK(
p.num_heads <= 1 || p.q_strideH % kMinimumAlignment == 0,
"query is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.num_heads <= 1 || p.k_strideH % kMinimumAlignment == 0,
"key is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.num_heads <= 1 || p.v_strideH % kMinimumAlignment == 0,
"value is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.num_batches <= 1 || p.q_strideB % kMinimumAlignment == 0,
"query is not correctly aligned (strideB)");
XFORMERS_CHECK(
p.num_batches <= 1 || p.k_strideB % kMinimumAlignment == 0,
"key is not correctly aligned (strideB)");
XFORMERS_CHECK(
p.num_batches <= 1 || p.v_strideB % kMinimumAlignment == 0,
"value is not correctly aligned (strideB)");
XFORMERS_CHECK(
p.q_strideM % kMinimumAlignment == 0,
"query is not correctly aligned (strideM)");
XFORMERS_CHECK(
p.k_strideM % kMinimumAlignment == 0,
"key is not correctly aligned (strideM)");
XFORMERS_CHECK(
p.v_strideM % kMinimumAlignment == 0,
"value is not correctly aligned (strideM)");
if (p.bias_ptr) {
XFORMERS_CHECK(
p.num_batches <= 1 || p.bias_strideB % kMinimumAlignment == 0,
"attn_bias is not correctly aligned (strideB)");
XFORMERS_CHECK(
p.num_heads <= 1 || p.bias_strideH % kMinimumAlignment == 0,
"attn_bias is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.bias_strideM % kMinimumAlignment == 0,
"attn_bias is not correctly aligned (strideM)");
}
if (p.grad_bias_ptr) {
XFORMERS_CHECK(
p.num_batches <= 1 || p.gB_strideB % kMinimumAlignment == 0,
"attn_bias.grad is not correctly aligned (strideB)");
XFORMERS_CHECK(
p.num_heads <= 1 || p.gB_strideH % kMinimumAlignment == 0,
"attn_bias.grad is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.gB_strideM % kMinimumAlignment == 0,
"attn_bias.grad is not correctly aligned (strideM)");
}
XFORMERS_CHECK(
!(p.cu_seqlens_q_ptr && p.bias_ptr),
"CuSeqlen + bias not implemented yet");
XFORMERS_CHECK(
p.custom_mask_type < NumCustomMaskTypes,
"Invalid value for `custom_mask_type`");
XFORMERS_CHECK(
p.dropout_prob <= 1.0f && p.dropout_prob >= 0.0f,
"Invalid value for `dropout_prob`");
XFORMERS_CHECK(
kApplyDropout || p.dropout_prob == 0.0f,
"Set `kApplyDropout`=True to support `dropout_prob > 0`");
XFORMERS_CHECK(p.head_dim > 0, "Invalid value for `head_dim`");
XFORMERS_CHECK(p.head_dim_value > 0, "Invalid value for `head_dim_value`");
XFORMERS_CHECK(p.num_queries > 0, "Invalid value for `num_queries`");
XFORMERS_CHECK(p.num_keys > 0, "Invalid value for `num_keys`");
XFORMERS_CHECK(p.num_heads > 0, "Invalid value for `num_heads`");
XFORMERS_CHECK(p.num_batches > 0, "Invalid value for `num_batches`");
XFORMERS_CHECK(p.head_dim <= kMaxK, "kMaxK: Expected `head_dim < kMaxK`");
XFORMERS_CHECK(
p.head_dim_value <= kMaxK, "kMaxK: Expected `head_dim_value < kMaxK`");
if (kKeysQueriesAlignedToBlockSize) {
XFORMERS_CHECK(
p.cu_seqlens_k_ptr == nullptr,
"This kernel does not support cu_seqlen");
XFORMERS_CHECK(
p.cu_seqlens_q_ptr == nullptr,
"This kernel does not support cu_seqlen");
XFORMERS_CHECK(
p.num_queries % kBlockSizeI == 0,
"kKeysQueriesAlignedToBlockSize condition not respected");
XFORMERS_CHECK(
p.num_keys % kBlockSizeJ == 0,
"kKeysQueriesAlignedToBlockSize condition not respected");
}
XFORMERS_CHECK(
kEnableSplitKeys || p.num_splits_key == 1, "SplitKeys is disabled");
XFORMERS_CHECK(
p.num_splits_key > 0, "Invalid `num_splits_key` (expected >0)");
XFORMERS_CHECK(
p.num_splits_key <= cutlass::ceil_div(p.num_keys, kBlockSizeJ),
"Invalid `num_splits_key` (too large)");
return true;
}
static CUTLASS_DEVICE void attention_kernel(Params p) {
extern __shared__ char smem_buffer[];
SharedStorage& shared_storage = *((SharedStorage*)smem_buffer);
uint16_t thread_id = threadIdx.x;
uint8_t warp_id = warp_uniform(thread_id / 32);
uint8_t lane_id = thread_id % 32;
int32_t key_start = p.split_key_device() * kBlockSizeJ;
if (key_start >= p.num_keys) {
return;
}
if (kPrologueQK) {
int32_t query_start = getQueryStart(p, key_start);
prologueQkNextIteration<true>(
shared_storage, p, query_start, key_start, warp_id, lane_id);
}
// Computes (dO*out).sum(-1) and writes it to `p.delta_ptr`
if (kKernelComputesDelta) {
constexpr int kOptimalElements =
128 / cutlass::sizeof_bits<scalar_t>::value;
if (p.head_dim_value % kOptimalElements == 0) {
for (int query_start = 0; query_start < p.num_queries;
query_start += kBlockSizeI) {
computeDelta<kOptimalElements>(p, query_start, warp_id, lane_id);
}
} else {
for (int query_start = 0; query_start < p.num_queries;
query_start += kBlockSizeI) {
computeDelta<1>(p, query_start, warp_id, lane_id);
}
}
__syncthreads();
}
OutputFragments output_frags;
curandStatePhilox4_32_10_t rng_state_init;
#ifdef HAS_PYTORCH
if (kApplyDropout) {
auto seeds = at::cuda::philox::unpack(p.rng_engine_inputs);
// each element of the attention matrix P with shape
// (batch_sz, n_heads, n_queries, n_keys) is associated with a single
// offset in RNG sequence. we initialize the RNG state with offset that
// starts at the beginning of a (n_queries, n_keys) matrix for this
// block's batch_id and head_id
// initializing rng state is very expensive, so we run once per kernel,
// rather than once per iteration. each iteration takes a copy of the
// initialized RNG state and offsets it as needed.
curand_init(
std::get<0>(seeds),
0,
std::get<1>(seeds) + p.dropout_batch_head_rng_offset,
&rng_state_init);
}
#endif
CUTLASS_PRAGMA_UNROLL
for (; key_start < p.num_keys;
key_start += p.num_splits_key_device() * kBlockSizeJ) {
output_frags.clear();
CUTLASS_PRAGMA_UNROLL
for (int32_t query_start_shifted = getQueryStart(p, key_start);
query_start_shifted < getQueryStartShift(p) + getQueryEnd(p);
query_start_shifted += kBlockSizeI) {
// This line here
// vvvvvvvvvvvvvv
warp_id = warp_uniform(warp_id);
// ^^^^^^^^^^^^^^
// ... makes everything use less RF and be 10% faster. Why?
// I don't know. My theory is that it forces `nvcc` to
// re-compute indices, offsets etc... and not keep them
// from the previous iteration, which prevents MASSIVE
// register spilling.
int32_t query_start = query_start_shifted;
if (query_start >= p.num_queries) {
query_start = query_start % getQueryEnd(p);
}
processBlockIJ<kKeysQueriesAlignedToBlockSize>(
shared_storage,
output_frags,
p,
query_start,
key_start,
rng_state_init,
warp_id,
lane_id);
}
if (kOutputInRF) {
writeFragsToGmem<kKeysQueriesAlignedToBlockSize>(
shared_storage, output_frags, p, key_start, warp_id, lane_id);
} else if (getQueryStart(p, key_start) >= p.num_queries) {
zfillGradKV<kKeysQueriesAlignedToBlockSize>(
p, key_start, warp_id, lane_id);
}
__syncthreads();
}
}
template <bool skipBoundsChecks>
static CUTLASS_DEVICE void zfillGradKV(
Params const& p,
int32_t key_start,
uint8_t warp_id,
uint8_t lane_id) {
constexpr int kThreadsPerKey = 8;
constexpr int kParallelKeys = kNumThreads / kThreadsPerKey;
static_assert(kBlockSizeJ % kParallelKeys == 0, "");
// This function is not really optimized, but should rarely be used
// It's only used when some keys are "useless" and don't attend to
// any query, due to causal masking
int thread_id = 32 * warp_id + lane_id;
int k_shift = lane_id % kThreadsPerKey;
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kBlockSizeJ; j += kParallelKeys) {
int key = key_start + j + (thread_id / kThreadsPerKey);
if (!skipBoundsChecks && key >= p.num_keys) {
continue;
}
auto gv_ptr = p.grad_value_ptr + key * p.gV_strideM();
auto gk_ptr = p.grad_key_ptr + key * p.gK_strideM();
for (int k = k_shift; k < p.head_dim_value; k += kThreadsPerKey) {
gv_ptr[k] = scalar_t(0);
}
for (int k = k_shift; k < p.head_dim; k += kThreadsPerKey) {
gk_ptr[k] = scalar_t(0);
}
}
}
template <bool skipBoundsChecks>
static CUTLASS_DEVICE void processBlockIJ(
SharedStorage& shared_storage,
OutputFragments& output_frags,
Params& p,
int32_t query_start,
int32_t key_start,
const curandStatePhilox4_32_10_t& curand_state_init,
uint8_t warp_id,
uint8_t lane_id) {
cutlass::Array<cutlass::uint1b_t, MatmulDOIVJ::Mma::FragmentC::kElements>
dropout_keep_mask_doivj;
dropout_keep_mask_doivj.fill(cutlass::uint1b_t{1});
const float dropout_scale =
kApplyDropout ? 1.0 / (1.0 - p.dropout_prob) : 1.0f;
cutlass::MatrixCoord no_offset{0, 0};
accum_t scale = p.scale;
int16_t thread_id = 32 * warp_id + lane_id;
auto rematerializeThreadIds = [&]() {
// Prevents `nvcc` from keeping values deduced from
// `thread_id`, `warp_id`, ... in RF - to reduce register pressure
warp_id = warp_uniform(thread_id / 32);
lane_id = thread_id % 32;
thread_id = 32 * warp_id + lane_id;
};
bool isFirstQuery = (query_start == getQueryStart(p, key_start));
int32_t next_query, next_key;
incrIteration(p, query_start, key_start, next_query, next_key);
bool isLastQuery = next_key != key_start;
accum_t di_rf = accum_t(0);
if (thread_id < kBlockSizeI) {
if (query_start + thread_id < p.num_queries) {
di_rf = p.delta_ptr[query_start + thread_id];
}
shared_storage.di()[thread_id] = di_rf;
}
int32_t num_queries_in_block = skipBoundsChecks
? MatmulQK::Mma::Shape::kN
: warp_uniform(cutlass::fast_min(
(int32_t)MatmulQK::Mma::Shape::kN, p.num_queries - query_start));
int32_t num_keys_in_block = skipBoundsChecks
? MatmulQK::Mma::Shape::kM
: warp_uniform(cutlass::fast_min(
(int32_t)MatmulQK::Mma::Shape::kM, p.num_keys - key_start));
auto prologueGradV = [&](int col) {
typename MatmulGradV::Mma::IteratorB iterator_dO(
{int32_t(p.gO_strideM)},
p.grad_output_ptr + query_start * p.gO_strideM + col,
{num_queries_in_block, p.head_dim_value - col},
thread_id,
no_offset);
MatmulGradV::Mma::prologue(
shared_storage.mm_gradV(),
iterator_dO,
thread_id,
num_queries_in_block);
};
auto prologueGradQ = [&](int col) {
typename MatmulGradQ::Mma::IteratorB iterator_K(
{int32_t(p.k_strideM)},
p.key_ptr + key_start * p.k_strideM + col,
{num_keys_in_block, p.head_dim - col},
thread_id,
no_offset);
MatmulGradQ::Mma::prologue(
shared_storage.mm_gradQ(), iterator_K, thread_id, num_keys_in_block);
};
auto prologueGradK = [&](int col) {
typename MatmulGradK::Mma::IteratorB iterator_Q(
{int32_t(p.q_strideM)},
p.query_ptr + query_start * p.q_strideM + col,
{num_queries_in_block, p.head_dim - col},
thread_id,
no_offset);
MatmulGradK::Mma::prologue(
shared_storage.mm_gradK(),
iterator_Q,
thread_id,
num_queries_in_block);
};
auto prologueDOV = [&]() {
typename MatmulDOIVJ::Mma::IteratorA iterator_A(
{int32_t(p.gO_strideM)},
p.grad_output_ptr + query_start * p.gO_strideM,
{num_queries_in_block, p.head_dim_value},
thread_id,
no_offset);
typename MatmulDOIVJ::Mma::IteratorB iterator_B(
{int32_t(p.v_strideM)},
p.value_ptr + key_start * p.v_strideM,
{p.head_dim_value, num_keys_in_block},
thread_id,
no_offset);
MatmulDOIVJ::Mma::prologue(
shared_storage.mm_doivj(),
iterator_A,
iterator_B,
thread_id,
p.head_dim_value);
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// MatmulQK
/////////////////////////////////////////////////////////////////////////////////////////////////
{
using Mma = typename MatmulQK::Mma;
cutlass::gemm::GemmCoord problem_size(
num_keys_in_block,
num_queries_in_block,
p.head_dim // k
);
// k_j
typename Mma::IteratorA iterator_A(
{int32_t(p.k_strideM)},
p.key_ptr + key_start * p.k_strideM,
{problem_size.m(), problem_size.k()},
thread_id,
no_offset);
// q_i.transpose(-2, -1)
typename Mma::IteratorB iterator_B(
{int32_t(p.q_strideM)},
p.query_ptr + query_start * p.q_strideM,
{problem_size.k(), problem_size.n()},
thread_id,
no_offset);
Mma mma(
shared_storage.mm_qk_k(),
shared_storage.mm_qk_q(),
thread_id,
warp_id,
lane_id);
typename Mma::FragmentC accum;
accum.clear();
auto gemm_k_iterations =
(problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma.set_prologue_done(kPrologueQK);
mma.set_zero_outside_bounds(!skipBoundsChecks);
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
accum = cutlass::multiplies<typename Mma::FragmentC>()(scale, accum);
// Epilogue: add LSE + exp and store that to our shared memory buffer
// shmem <- (matmul_result -
// logsumexp[i_start:i_end].unsqueeze(1)).exp()
int warp_idx_mn_0 =
warp_id % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN);
auto output_tile_coords = cutlass::MatrixCoord{
warp_idx_mn_0 % Mma::Base::WarpCount::kM,
warp_idx_mn_0 / Mma::Base::WarpCount::kM};
// apply bias if applicable
if (p.bias_ptr != nullptr) {
// load bias tile Bij into shared memory
typename MatmulQK::BiasLoader::GmemTileIterator bias_iter(
{cutlass::layout::RowMajor(p.bias_strideM)},
p.bias_ptr + query_start * p.bias_strideM + key_start,
{num_queries_in_block, num_keys_in_block},
thread_id);
cutlass::TensorRef<scalar_t, cutlass::layout::RowMajor> bias_tensor_ref(
shared_storage.bias().data(),
cutlass::layout::RowMajor(MatmulQK::ThreadblockShape::kM));
typename MatmulQK::BiasLoader::SmemTileIterator smem_tile_iter(
bias_tensor_ref, thread_id);
MatmulQK::BiasLoader::load(bias_iter, smem_tile_iter);
// Pij += Bij, where Pij is in register fragment and Bij is in shmem
auto lane_offset = MatmulQK::AccumLambdaIterator::get_lane_offset(
lane_id, warp_id, output_tile_coords);
MatmulQK::AccumLambdaIterator::iterateRows(
lane_offset,
[&](int accum_n) {},
[&](int accum_m, int accum_n, int idx) {
// remember we are transposed
accum[idx] += bias_tensor_ref.at({accum_n, accum_m});
},
[&](int accum_n) {});
}
// Apply mask
if (p.custom_mask_type == CausalFromTopLeft ||
p.custom_mask_type == CausalFromBottomRight) {
auto lane_offset = MatmulQK::AccumLambdaIterator::get_lane_offset(
lane_id, warp_id, output_tile_coords);
int shift = query_start - key_start;
if (p.custom_mask_type == CausalFromBottomRight) {
shift += p.num_keys - p.num_queries;
}
// current_key = key_start + accum_m
// current_query = query_start + accum_n
// mask if: `current_key > current_query`
MatmulQK::AccumLambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {},
[&](int accum_m, int accum_n, int idx) {
if (accum_m > accum_n + shift) {
accum[idx] =
-cutlass::platform::numeric_limits<accum_t>::infinity();
}
},
[&](int accum_m) {});
}
__syncthreads();
if (kPrologueGV) {
prologueGradV(0);
}
if (kPrologueDOV) {
prologueDOV();
}
MatmulQK::B2bGemm::accumApplyLSEToSmem(
shared_storage.attn_shared_storage(),
accum,
p.logsumexp_ptr + query_start,
problem_size.n(),
thread_id,
warp_id,
lane_id,
output_tile_coords);
#if 0
auto accum_ref_attnT = shared_storage.attn_shared_storage().accum_ref();
PRINT_TENSOR4x4_T0_L0("attn_T", accum_ref_attnT);
#endif
// if we are using dropout, compute Zij, writing it to shared memory.
// each element of Zij is:
// - 0 with probability dropout_p
// - 1 / (1 - dropout_p) with probability 1 - dropout_p
if (kApplyDropout) {
auto zij = shared_storage.zij().accum_ref();
// each thread generates a contiguous sequence of elements in Zij, all
// in the same row. the reason they have to come from the same row is
// that sampling random numbers from a contiguous random number sequence
// is much more efficient than jumping around, and the linear offset of
// each element of Z (the global matrix) maps to an offset in a random
// number sequence. for Z, the end of a row and the beginning of the
// next have adjacent offsets, but for Zij (tile of global matrix), this
// is not necessarily the case.
// We must fill the entire `zij` shmem with values (even out of bounds
// on the K-dimension) otherwise we can get NaNs during the GEMM
const int kQueriesPerBlock = kBlockSizeI;
const int threads_per_row = cutlass::fast_min(
int32_t(kNumThreads / kQueriesPerBlock), num_keys_in_block);
const int elts_per_thread = cutlass::round_nearest(
cutlass::ceil_div(num_keys_in_block, threads_per_row), 4);
const int thread_i = thread_id / threads_per_row;
const int thread_start_j =
(thread_id % threads_per_row) * elts_per_thread;
if (thread_i < kQueriesPerBlock && thread_start_j < num_keys_in_block) {
curandStatePhilox4_32_10_t curand_state = curand_state_init;
skipahead(
(query_start + thread_i) * p.num_keys +
(key_start + thread_start_j),
&curand_state);
// generate elements of Zij, 4 elements at a time
for (int zij_start_col_idx = thread_start_j; zij_start_col_idx <
cutlass::fast_min<int32_t>(thread_start_j + elts_per_thread,
num_keys_in_block);
zij_start_col_idx += 4) {
const float4 rand_uniform_quad = curand_uniform4(&curand_state);
CUTLASS_PRAGMA_UNROLL
for (int quad_idx = 0; quad_idx < 4; ++quad_idx) {
// we'll write Zij transposed since attention is also transposed
// during the matmul to compute dV.
zij.at({zij_start_col_idx + quad_idx /*k*/, thread_i /*q*/}) =
(&rand_uniform_quad.x)[quad_idx] > p.dropout_prob
? scalar_t(dropout_scale)
: scalar_t(0);
}
}
}
__syncthreads();
#if 0
PRINT_TENSOR4x4_T0_L0("zij", zij);
PRINT_TENSOR4x4_T0_L0_START("zij", zij, kBlockSizeJ - 4, kBlockSizeI - 4);
#endif
// Save mask for later DOIVJ matmul
int warp_idx_mn_0 = warp_id %
(MatmulDOIVJ::Mma::Base::WarpCount::kM *
MatmulDOIVJ::Mma::Base::WarpCount::kN);
auto output_tile_coords_doivj = cutlass::MatrixCoord{
warp_idx_mn_0 % MatmulDOIVJ::Mma::Base::WarpCount::kM,
warp_idx_mn_0 / MatmulDOIVJ::Mma::Base::WarpCount::kM};
auto lane_offset = MatmulDOIVJ::AccumLambdaIterator::get_lane_offset(
lane_id, warp_id, output_tile_coords_doivj);
MatmulDOIVJ::AccumLambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {},
[&](int accum_m /*q*/, int accum_n /*k*/, int idx) {
if (zij.at({accum_n, accum_m}) == scalar_t(0)) {
dropout_keep_mask_doivj[idx] = cutlass::uint1b_t{0};
}
},
[&](int accum_m) {});
}
__syncthreads();
}
rematerializeThreadIds();
/////////////////////////////////////////////////////////////////////////////////////////////////
// GradV matmul
//
// grad_v[j_start:j_end] += attn_T @ do_i
/////////////////////////////////////////////////////////////////////////////////////////////////
constexpr bool kSingleIterationGradV =
kMaxK <= MatmulGradV::ThreadblockShape::kN;
for (int col = 0; col < (kSingleIterationGradV ? 1 : p.head_dim_value);
col += MatmulGradV::ThreadblockShape::kN) {
using Mma = typename MatmulGradV::Mma;
using AccumTileGmem = typename MatmulGradQ::AccumTileGmem;
cutlass::gemm::GemmCoord problem_size(
num_keys_in_block, p.head_dim_value - col, num_queries_in_block);
auto createEpilogueIter = [&]() {
return typename MatmulGradV::OutputTileIterator(
typename MatmulGradV::OutputTileIterator::Params{p.gV_strideM()},
p.grad_value_ptr + key_start * p.gV_strideM() + col,
{num_keys_in_block, p.head_dim_value - col},
thread_id);
};
typename Mma::IteratorB iterator_B(
{int32_t(p.gO_strideM)},
p.grad_output_ptr + query_start * p.gO_strideM + col,
{num_queries_in_block, p.head_dim_value - col},
thread_id,
no_offset);
// if dropout: dVj += (Pij.T * Zij) @ dOi
// otherwise: dVj += Pij.T @ dOi
Mma mma(
// operand A: Pij.T
shared_storage.attn_shared_storage().accum_ref(),
// operand A_scale Zij.T:
// if we're using dropout, operand A is Pij_dropped.T = Pij.T * Zij.T
// which is computed on the fly as fragments of Pij.T are loaded in
shared_storage.zij().accum_ref(),
// operand B: dOi - which was loaded into shared memory previously
// when we computed dVj
shared_storage.mm_gradV().operand_B_ref(),
thread_id,
warp_id,
lane_id);
int storage_id = col / MatmulGradV::ThreadblockShape::kN;
AccumTileGmem gmem_tile{
p.workspace_gv + storage_id * AccumTileGmem::kElementsStored};
if (!kOutputInRF) {
if (isFirstQuery || !kNeedsAccumGradV) {
output_frags.gradV.clear();
} else {
gmem_tile.load(output_frags.gradV, thread_id);
}
}
mma.set_prologue_done(kPrologueGV);
auto gemm_k_iterations =
(problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
__syncthreads();
mma(gemm_k_iterations,
output_frags.gradV,
iterator_B,
output_frags.gradV);
__syncthreads();
if (kPrologueGV && !kSingleIterationGradV &&
col + MatmulGradV::ThreadblockShape::kN < p.head_dim_value) {
prologueGradV(col + MatmulGradV::ThreadblockShape::kN);
}
if (!kOutputInRF) {
if (kNeedsAccumGradV && !isLastQuery) {
gmem_tile.store(output_frags.gradV, thread_id);
} else {
accumulateInGmem<MatmulGradV>(
shared_storage.gradV_epilogue(),
output_frags.gradV,
createEpilogueIter(),
isFirstQuery || kNeedsAccumGradV,
warp_id,
lane_id);
}
}
}
__syncthreads();
/////////////////////////////////////////////////////////////////////////////////////////////////
// MatmulDOIVJ
/////////////////////////////////////////////////////////////////////////////////////////////////
{
using Mma = typename MatmulDOIVJ::Mma;
// do_i
typename Mma::IteratorA iterator_A(
{int32_t(p.gO_strideM)},
p.grad_output_ptr + query_start * p.gO_strideM,
{num_queries_in_block, p.head_dim_value},
thread_id,
no_offset);
// v_j.transpose(-2, -1)
typename Mma::IteratorB iterator_B(
{int32_t(p.v_strideM)},
p.value_ptr + key_start * p.v_strideM,
{p.head_dim_value, num_keys_in_block},
thread_id,
no_offset);
Mma mma(shared_storage.mm_doivj(), thread_id, warp_id, lane_id);
mma.set_prologue_done(kPrologueDOV);
mma.set_zero_outside_bounds(!skipBoundsChecks);
typename Mma::FragmentC accum;
accum.clear();
auto gemm_k_iterations =
(p.head_dim_value + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
__syncthreads();
if (kPrologueGQ) {
prologueGradQ(0);
}
if (kPrologueGK) {
prologueGradK(0);
}
int warp_idx_mn_0 =
warp_id % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN);
auto output_tile_coords = cutlass::MatrixCoord{
warp_idx_mn_0 % Mma::Base::WarpCount::kM,
warp_idx_mn_0 / Mma::Base::WarpCount::kM};
// TODO: This must be terribly inefficient. There must be a better way
// tmp [RF] <- (accum [RF] - Di [smem] ) * attn_T.T [smem]
// attn_shared_storage [smem] <- tmp.T
// tmp_shared_storage [smem] <- tmp
{
using LambdaIterator = typename MatmulDOIVJ::AccumLambdaIterator;
auto lane_offset = LambdaIterator::get_lane_offset(
lane_id, warp_id, output_tile_coords);
// if dropout was used, compute dPij = dPij_dropped * Zij
if (kApplyDropout) {
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {},
[&](int accum_m, int accum_n, int idx) {
if (dropout_keep_mask_doivj[idx].get()) {
accum[idx] *= dropout_scale;
} else {
accum[idx] = 0;
}
},
[&](int accum_m) {});
}
auto attn_T = shared_storage.attn_shared_storage().accum_ref();
#if 0
PRINT_B0_T0("doivj_dropped");
print_warp_accum<LambdaIterator>(accum, lane_offset, 4, 4);
PRINT_TENSOR4x4_T0_L0("attn_T", attn_T)
#endif
accum_t current_di;
// dSij = (dPij - Di) * Pij
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) { current_di = shared_storage.di()[accum_m]; },
[&](int accum_m, int accum_n, int idx) {
// TODO: Otherwise we can get nans as we
// might have infs here (only seen on f16 tho)
if (skipBoundsChecks ||
(accum_m < num_queries_in_block &&
accum_n < num_keys_in_block)) {
accum_t attn = attn_T.at({accum_n, accum_m});
accum[idx] = (accum[idx] - current_di) * attn;
} else {
accum[idx] = 0;
}
},
[&](int accum_m) {
});
// store bias gradient tile dBij to global memory,
// where dBij = dSij = Pij * (dPij - Di)
if (p.grad_bias_ptr != nullptr) {
typename MatmulDOIVJ::BiasGradEpilogue::OutputTileIterator
output_iter(
typename MatmulDOIVJ::BiasGradEpilogue::OutputTileIterator::
Params{p.gB_strideM},
// grad_bias_ptr is offset to point at beginning of
// matrix of shape (queries, keys) for a given
// (batch_id, head_id) the pointer arithmetic here produces
// a pointer to the start of the current tile within that
// matrix
p.grad_bias_ptr + query_start * p.gB_strideM + key_start,
{num_queries_in_block, num_keys_in_block},
thread_id);
// no-op epilogue operator - just casting and storing contents of
// accum to global memory
typename MatmulDOIVJ::BiasGradEpilogue::OutputOp output_op({1, 1});
typename MatmulDOIVJ::BiasGradEpilogue epilogue(
shared_storage.gradB_epilogue(), thread_id, warp_id, lane_id);
epilogue(output_op, output_iter, accum, output_iter);
}
accum = accum * scale;
#if 0
PRINT_B0_T0("(doivj - di) * attn * scale");
print_warp_accum<LambdaIterator>(accum, lane_offset, 4, 4);
#endif
__syncthreads();
if (!MatmulGradK::DefaultMmaFromSmem::kIsTransposedA) {
auto tmpT = shared_storage.tmpT_shared_storage().accum_ref();
// attn <- attn_T.T
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {},
[&](int accum_m, int accum_n, int idx) {
tmpT.at({accum_n, accum_m}) = scalar_t(accum[idx]);
},
[&](int accum_m) {});
}
}
MatmulDOIVJ::B2bGemm::accumToSmem(
shared_storage.tmp_shared_storage(),
accum,
lane_id,
output_tile_coords);
__syncthreads();
}
// Force `nvcc` to recompute values that depend on the variables just below
// to use less RF and prevent some spilling
p.head_dim = warp_uniform(p.head_dim);
p.k_strideM = warp_uniform(p.k_strideM);
rematerializeThreadIds();
/////////////////////////////////////////////////////////////////////////////////////////////////
// GradQ matmul
//
// grad_q[i_start:i_end] += tmp @ k_j
/////////////////////////////////////////////////////////////////////////////////////////////////
// Skip the loop & associated branches if we know at compile time the number
// of iterations
constexpr bool kSingleIterationGradQ =
kMaxK <= MatmulGradQ::ThreadblockShape::kN;
for (int col = 0; col < (kSingleIterationGradQ ? 1 : p.head_dim);
col += MatmulGradQ::ThreadblockShape::kN) {
using Mma = typename MatmulGradQ::Mma;
using AccumTileGmem = typename MatmulGradQ::AccumTileGmem;
cutlass::gemm::GemmCoord problem_size(
num_queries_in_block,
false ? MatmulGradQ::ThreadblockShape::kN : p.head_dim - col,
num_keys_in_block);
// k_j
typename Mma::IteratorB iterator_B(
{int32_t(p.k_strideM)},
p.key_ptr + key_start * p.k_strideM + col,
{problem_size.k(), problem_size.n()},
thread_id,
no_offset);
auto a = shared_storage.tmp_shared_storage().accum_ref();
Mma mma(
// operand A: dSij
shared_storage.tmp_shared_storage().accum_ref(),
// operand B: Kj
shared_storage.mm_gradQ().operand_B_ref(),
thread_id,
warp_id,
lane_id);
typename Mma::FragmentC accum;
int col_id = col / MatmulGradQ::ThreadblockShape::kN;
int num_cols = kSingleIterationGradQ
? 1
: ceil_div(p.head_dim, MatmulGradQ::ThreadblockShape::kN);
int storage_id = (col_id + query_start / kBlockSizeI * num_cols);
if (p.num_splits_key_device() > 1) {
AtomicLock::acquire(
&p.workspace_gq[storage_id].lock,
p.split_key_device() + 1,
thread_id);
// Make sure we can see other block's output
__threadfence();
}
AccumTileGmem gmem_tile{&p.workspace_gq[storage_id].buffer[0]};
if (!kNeedsAccumGradQ ||
(p.num_splits_key_device() == 1 && key_start == 0)) {
// if we know we are the first to access it, we know it's only zeros.
// Avoids a load from gmem (and gmem init as well)
accum.clear();
} else {
gmem_tile.load(accum, thread_id);
}
auto gemm_k_iterations =
(problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
__syncthreads();
mma.set_prologue_done(kPrologueGQ);
mma(gemm_k_iterations, accum, iterator_B, accum);
__syncthreads();
bool isLastColumn = kSingleIterationGradQ ||
(col + MatmulGradQ::ThreadblockShape::kN >= p.head_dim);
if (kPrologueGQ && !isLastColumn) {
prologueGradQ(col + MatmulGradQ::ThreadblockShape::kN);
}
bool isLast = [&]() {
int32_t next_key = key_start + p.num_splits_key_device() * kBlockSizeJ;
if (p.num_keys <= next_key) {
return true;
}
if (query_start < getSmallestQueryForKey(p, next_key)) {
return true;
}
return false;
}();
// Output results
if (p.num_splits_key_device() > 1) {
int32_t numAddsSoFar = -1;
if (isLast && thread_id == 0) {
numAddsSoFar = atomicAdd(&p.workspace_gq[storage_id].counter, 1) +
1; // `atomicAdd` returns the old value
}
isLast = __syncthreads_or(
numAddsSoFar == getNumParallelBlocksForQuery(p, query_start));
assert(numAddsSoFar <= getNumParallelBlocksForQuery(p, query_start));
}
if (kNeedsAccumGradQ && !isLast) {
gmem_tile.store(accum, thread_id);
if (p.num_splits_key_device() > 1) {
// Make sure everyone wrote before we release the lock
__threadfence();
__syncthreads();
AtomicLock::release(&p.workspace_gq[storage_id].lock, thread_id);
}
} else {
// NOTE: We're not releasing the lock because no one is expected
// to come after us (we're the last one to write)
typename MatmulGradQ::OutputTileIterator output_it(
typename MatmulGradQ::OutputTileIterator::Params{p.gQ_strideM()},
p.grad_query_ptr + query_start * p.gQ_strideM() + col,
{problem_size.m(), problem_size.n()},
thread_id);
bool storage_contains_zeros = kNeedsAccumGradQ || key_start == 0 ||
(p.num_splits_key_device() > 1);
accumulateInGmem<MatmulGradQ>(
isLastColumn ? shared_storage.gradQ_epilogue_lastIter()
: shared_storage.gradQ_epilogue(),
accum,
output_it,
storage_contains_zeros,
warp_id,
lane_id);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// GradK matmul
//
// grad_k[i_start:i_end] += tmp.transpose(-2, -1) @ q_i
/////////////////////////////////////////////////////////////////////////////////////////////////
rematerializeThreadIds();
constexpr bool kSingleIterationGradK =
kMaxK <= MatmulGradK::ThreadblockShape::kN;
for (int col = 0; col < (kSingleIterationGradK ? 1 : p.head_dim);
col += MatmulGradK::ThreadblockShape::kN) {
using Mma = typename MatmulGradK::Mma;
using AccumTileGmem = typename MatmulGradQ::AccumTileGmem;
cutlass::gemm::GemmCoord problem_size(
num_keys_in_block,
false ? MatmulGradK::ThreadblockShape::kN : p.head_dim - col,
num_queries_in_block);
auto createEpilogueIter = [&]() {
return typename MatmulGradK::OutputTileIterator(
typename MatmulGradK::OutputTileIterator::Params{p.gK_strideM()},
p.grad_key_ptr + key_start * p.gK_strideM() + col,
{num_keys_in_block,
false ? MatmulGradK::ThreadblockShape::kN : p.head_dim - col},
thread_id);
};
// q_i
typename Mma::IteratorB iterator_B(
{int32_t(p.q_strideM)},
p.query_ptr + query_start * p.q_strideM + col,
{problem_size.k(), problem_size.n()},
thread_id,
no_offset);
auto getTmp = [&](int) { return &shared_storage.tmp_shared_storage(); };
auto getTmpT = [&](int) { return &shared_storage.tmpT_shared_storage(); };
// this is basically:
// opA = kIsTransposedA ? getTmp() : getTmpT();
bool constexpr kIsTransposedA =
MatmulGradK::DefaultMmaFromSmem::kIsTransposedA;
auto& opA = *call_conditional<
kIsTransposedA,
decltype(getTmp),
decltype(getTmpT)>::apply(getTmp, getTmpT, 0);
Mma mma(
// operand A: dSij.T
opA.accum_ref(),
// operand B: Qi
shared_storage.mm_gradK().operand_B_ref(),
thread_id,
warp_id,
lane_id);
int storage_id = col / MatmulGradK::ThreadblockShape::kN;
AccumTileGmem gmem_tile{
p.workspace + storage_id * AccumTileGmem::kElementsStored};
if (!kOutputInRF) {
if (isFirstQuery || !kNeedsAccumGradK) {
output_frags.gradK.clear();
} else {
gmem_tile.load(output_frags.gradK, thread_id);
}
}
mma.set_prologue_done(kPrologueGK);
auto gemm_k_iterations =
(problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
__syncthreads();
mma(gemm_k_iterations,
output_frags.gradK,
iterator_B,
output_frags.gradK);
__syncthreads();
bool isLastColumn = kSingleIterationGradK ||
col + MatmulGradK::ThreadblockShape::kN >= p.head_dim;
if (kPrologueGK && !isLastColumn) {
prologueGradK(col + MatmulGradK::ThreadblockShape::kN);
}
if (kPrologueQK && isLastColumn) {
int32_t next_query, next_key;
incrIteration(p, query_start, key_start, next_query, next_key);
DISPATCH_BOOL(
next_key != key_start, kForceReloadK, ([&]() {
prologueQkNextIteration<kForceReloadK>(
shared_storage, p, next_query, next_key, warp_id, lane_id);
}));
}
// Output results
if (!kOutputInRF) {
if (kNeedsAccumGradK && !isLastQuery) {
gmem_tile.store(output_frags.gradK, thread_id);
} else {
accumulateInGmem<MatmulGradK>(
isLastColumn ? shared_storage.gradK_epilogue_final()
: shared_storage.gradK_epilogue(),
output_frags.gradK,
createEpilogueIter(),
isFirstQuery || kNeedsAccumGradK,
warp_id,
lane_id);
__syncthreads();
}
}
}
}
static CUTLASS_DEVICE int32_t getQueryStartShift(Params const& p) {
if (p.custom_mask_type == NoCustomMask && p.num_splits_key_device() > 1) {
return (p.split_key_device() * kBlockSizeI) % getQueryEnd(p);
}
return 0;
}
// Iteration order logic
static CUTLASS_DEVICE int32_t
getQueryStart(Params const& p, int32_t key_start) {
return getSmallestQueryForKey(p, key_start) + getQueryStartShift(p);
};
static CUTLASS_DEVICE int32_t getQueryEnd(Params const& p) {
return align_up(p.num_queries, kBlockSizeI);
};
static CUTLASS_DEVICE int32_t
getSmallestQueryForKey(Params const& p, int32_t key_start) {
if (p.custom_mask_type == CausalFromTopLeft) {
return (key_start / kBlockSizeI) * kBlockSizeI;
} else if (p.custom_mask_type == CausalFromBottomRight) {
int first_query =
cutlass::fast_max(0, key_start - p.num_keys + p.num_queries);
return (first_query / kBlockSizeI) * kBlockSizeI;
}
return 0;
};
// Returns how many kernel blocks will write to a given block in `grad_query`
// This is usually equal to the number of key splits, but can be different
// for instance in the causal case, or varying seqlen
static CUTLASS_DEVICE int32_t
getNumParallelBlocksForQuery(Params const& p, int32_t query_start) {
int16_t num_key_blocks = ceil_div(p.num_keys, kBlockSizeJ);
if (p.custom_mask_type == CausalFromTopLeft) {
int32_t last_key_for_block = query_start + kBlockSizeI - 1;
last_key_for_block = cutlass::fast_min(last_key_for_block, p.num_keys);
num_key_blocks = ceil_div(last_key_for_block, kBlockSizeJ);
} else if (p.custom_mask_type == CausalFromBottomRight) {
int32_t last_key_for_block =
query_start + (kBlockSizeI - 1) + (1 + p.num_keys - p.num_queries);
last_key_for_block = cutlass::fast_min(last_key_for_block, p.num_keys);
num_key_blocks = ceil_div(last_key_for_block, kBlockSizeJ);
}
return cutlass::fast_min(p.num_splits_key_device(), num_key_blocks);
};
// Returns the next block to process
static CUTLASS_DEVICE void incrIteration(
Params const& p,
int32_t query_start,
int32_t key_start,
int32_t& next_query,
int32_t& next_key) {
next_query = query_start + kBlockSizeI;
next_key = key_start;
auto query_shift = getQueryStartShift(p);
// Wrap around
if (query_shift) {
if (next_query >= p.num_queries) {
next_query = getSmallestQueryForKey(p, key_start);
return;
} else if (query_start < query_shift && query_shift <= next_query) {
// jump to next key
} else {
return;
}
} else {
if (next_query < p.num_queries) {
return;
}
// jump to next key
}
// Next key
next_key = key_start + p.num_splits_key_device() * kBlockSizeJ;
next_query = getQueryStart(p, next_key);
}
template <bool kForceReloadK>
static CUTLASS_DEVICE void prologueQkNextIteration(
SharedStorage& shared_storage,
Params const& p,
int32_t query_start,
int32_t key_start,
uint8_t warp_id,
uint8_t lane_id) {
if (query_start >= p.num_queries || key_start >= p.num_keys) {
return;
}
static constexpr bool kReloadK =
kForceReloadK || !MatmulQK::Mma::kSmemContainsEntireMat;
int thread_id = 32 * warp_id + lane_id;
typename MatmulQK::Mma::IteratorA iterator_A(
{int32_t(p.k_strideM)},
p.key_ptr + key_start * p.k_strideM,
{p.num_keys - key_start, p.head_dim},
thread_id,
cutlass::MatrixCoord{0, 0});
typename MatmulQK::Mma::IteratorB iterator_B(
{int32_t(p.q_strideM)},
p.query_ptr + query_start * p.q_strideM,
{p.head_dim, p.num_queries - query_start},
thread_id,
cutlass::MatrixCoord{0, 0});
MatmulQK::Mma::prologue<kReloadK, true>(
shared_storage.mm_qk_k(),
shared_storage.mm_qk_q(),
iterator_A,
iterator_B,
thread_id,
p.head_dim);
}
template <bool skipBoundsChecks>
static CUTLASS_DEVICE void writeFragsToGmem(
SharedStorage& shared_storage,
OutputFragments& output_frags,
Params const& p,
int32_t key_start,
uint8_t warp_id,
uint8_t lane_id) {
uint16_t thread_id = 32 * warp_id + lane_id;
int32_t num_keys_in_block = skipBoundsChecks
? MatmulQK::Mma::Shape::kM
: cutlass::fast_min(
(int32_t)MatmulQK::Mma::Shape::kM, p.num_keys - key_start);
typename MatmulGradV::OutputTileIterator outputV_it(
typename MatmulGradV::OutputTileIterator::Params{p.gV_strideM()},
p.grad_value_ptr + key_start * p.gV_strideM(),
{num_keys_in_block, p.head_dim_value},
thread_id);
accumulateInGmem<MatmulGradV>(
shared_storage.gradV_epilogue_final(),
output_frags.gradV,
outputV_it,
true,
warp_id,
lane_id);
typename MatmulGradK::OutputTileIterator outputK_it(
typename MatmulGradK::OutputTileIterator::Params{p.gK_strideM()},
p.grad_key_ptr + key_start * p.gK_strideM(),
{num_keys_in_block,
false ? MatmulGradK::ThreadblockShape::kN : p.head_dim},
thread_id);
accumulateInGmem<MatmulGradK>(
shared_storage.gradK_epilogue_final(),
output_frags.gradK,
outputK_it,
true,
warp_id,
lane_id);
}
template <typename MatmulT>
static CUTLASS_DEVICE void accumulateInGmem(
typename MatmulT::DefaultEpilogue::SharedStorage& epilogue_smem,
typename MatmulT::Mma::FragmentC const& accum,
typename MatmulT::OutputTileIterator output_it,
bool first,
uint8_t warp_id,
uint8_t lane_id) {
using DefaultEpilogue = typename MatmulT::DefaultEpilogue;
using DefaultOutputOp = typename MatmulT::DefaultOutputOp;
using Mma = typename MatmulT::Mma;
int thread_id = 32 * warp_id + lane_id;
DISPATCH_BOOL(
first, kIsFirst, ([&]() {
static constexpr auto ScaleType = kIsFirst
? cutlass::epilogue::thread::ScaleType::Nothing
: cutlass::epilogue::thread::ScaleType::NoBetaScaling;
using EpilogueOutputOp =
typename cutlass::epilogue::thread::LinearCombination<
typename DefaultOutputOp::ElementOutput,
DefaultOutputOp::kCount,
typename DefaultOutputOp::ElementAccumulator,
typename DefaultOutputOp::ElementCompute,
ScaleType>;
using Epilogue =
typename cutlass::epilogue::threadblock::EpiloguePipelined<
typename DefaultEpilogue::Shape,
typename Mma::Operator,
DefaultEpilogue::kPartitionsK,
typename MatmulT::OutputTileIterator,
typename DefaultEpilogue::AccumulatorFragmentIterator,
typename DefaultEpilogue::WarpTileIterator,
typename DefaultEpilogue::SharedLoadIterator,
EpilogueOutputOp,
typename DefaultEpilogue::Padding,
DefaultEpilogue::kFragmentsPerIteration,
true // IterationsUnroll
>;
EpilogueOutputOp rescale({1, 1});
Epilogue epilogue(epilogue_smem, thread_id, warp_id, lane_id);
epilogue(rescale, output_it, accum, output_it);
}));
}
template <int kElementsPerAccess>
static CUTLASS_DEVICE void computeDelta(
Params const& p,
int32_t query_start,
uint8_t warp_id,
uint8_t lane_id) {
// Each thread computes one value for Delta
// Depending on warp configuration, we might have multiple
// threads of the same warp working on the same row
using AccessType = cutlass::Array<scalar_t, kElementsPerAccess>;
static_assert(kNumThreads >= kBlockSizeI, "");
static constexpr int kNumThreadsPerLine = kNumThreads / kBlockSizeI;
int16_t thread_id = 32 * warp_id + lane_id;
int16_t laneFirstCol = kElementsPerAccess * (lane_id % kNumThreadsPerLine);
int16_t laneRow = thread_id / kNumThreadsPerLine;
bool rowPred = (query_start + laneRow) < p.num_queries;
bool pred = rowPred;
// on windows, previous syntax __restrict__ AccessType*
// resulted in error: "restrict" is not allowed
const AccessType* __restrict__ grad_output_ptr =
reinterpret_cast<const AccessType*>(
p.grad_output_ptr + (query_start + laneRow) * p.gO_strideM +
laneFirstCol);
const AccessType* __restrict__ output_ptr =
reinterpret_cast<const AccessType*>(
p.output_ptr + (query_start + laneRow) * p.o_strideM() +
laneFirstCol);
static constexpr int64_t kMaxIters =
kMaxK / (kElementsPerAccess * kNumThreadsPerLine);
constexpr int kPipelineStages = 2;
accum_t delta_value = accum_t(0);
using GlobalLoad =
cutlass::arch::global_load<AccessType, sizeof(AccessType)>;
AccessType frag_grad_output[kPipelineStages];
AccessType frag_output[kPipelineStages];
auto loadAndIncrement = [&](int ld_pos, bool is_valid) {
frag_grad_output[ld_pos].clear();
frag_output[ld_pos].clear();
GlobalLoad(frag_grad_output[ld_pos], grad_output_ptr, is_valid);
GlobalLoad(frag_output[ld_pos], output_ptr, is_valid);
grad_output_ptr += kNumThreadsPerLine;
output_ptr += kNumThreadsPerLine;
};
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < kPipelineStages - 1; ++iter) {
int ld_pos = iter % kPipelineStages;
pred = pred &&
(laneFirstCol + iter * kElementsPerAccess * kNumThreadsPerLine) <
p.head_dim_value;
loadAndIncrement(ld_pos, pred);
}
auto columnIteration = [&](int iter) {
// Load for next iter
int ld_pos = (iter + kPipelineStages - 1) % kPipelineStages;
pred = pred &&
(laneFirstCol +
(iter + kPipelineStages - 1) * kElementsPerAccess *
kNumThreadsPerLine) < p.head_dim_value;
loadAndIncrement(ld_pos, pred);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < AccessType::kElements; ++i) {
delta_value += accum_t(frag_output[iter % kPipelineStages][i]) *
accum_t(frag_grad_output[iter % kPipelineStages][i]);
}
};
// If we have a small lower-bound for K, we can unroll the loop
if (kMaxK <= 256) {
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < kMaxIters; ++iter) {
columnIteration(iter);
}
} else {
int num_iters =
ceil_div(p.head_dim_value, kElementsPerAccess * kNumThreadsPerLine) *
(kElementsPerAccess * kNumThreadsPerLine);
for (int iter = 0; iter < num_iters; ++iter) {
columnIteration(iter);
}
}
// Reduce between workers
static_assert(
kNumThreadsPerLine == 1 || kNumThreadsPerLine == 2 ||
kNumThreadsPerLine == 4,
"");
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < kNumThreadsPerLine; i *= 2) {
delta_value = delta_value + __shfl_xor_sync(0xffffffff, delta_value, i);
}
// Store in gmem
if (rowPred) {
p.delta_ptr[query_start + laneRow] = delta_value;
}
}
};
template <typename AK>
__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm)
attention_kernel_backward_batched_impl(typename AK::Params p) {
if (!p.advance_to_block()) {
return;
}
AK::attention_kernel(p);
}
template <typename AK>
__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm)
attention_kernel_backward_batched(typename AK::Params params);
| examples/41_fused_multi_head_attention/kernel_backward.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/kernel_backward.h",
"repo_id": "examples",
"token_count": 44873
} | 13 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Performs a dual gemm in one fused kernel:
```
D0 = epilogue0(X @ B0, C0)
D1 = epilogue1(X @ B1, C1)
D2 = element_wise(D0, D1)
```
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "../kernel/dual_gemm.h"
#include "../dual_gemm_common.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B0 matrix operand
typename LayoutB0_,
/// Layout type for B1 matrix operand
typename LayoutB1_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp0_,
typename EpilogueOutputOp1_,
typename EpilogueOutputOp2_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
bool StoreD0 = true,
bool StoreD1 = true,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator>
class DualGemm {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB0 = LayoutB0_;
using LayoutB1 = LayoutB1_;
using TensorRefB0 = TensorRef<ElementB const, LayoutB0>;
using TensorRefB1 = TensorRef<ElementB const, LayoutB1>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp0 = EpilogueOutputOp0_;
using EpilogueOutputOp1 = EpilogueOutputOp1_;
using EpilogueOutputOp2 = EpilogueOutputOp2_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp1::kCount;
static bool const kSplitKSerial = SplitKSerial;
static bool constexpr kStoreD0 = StoreD0;
static bool constexpr kStoreD1 = StoreD1;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
using LayoutScaleBias = layout::RowMajor;
/// Define the kernel
/// Define the threadblock-scoped matrix multiply-accumulate
static_assert(ArchTag::kMinComputeCapability >= 80, "Only multistage is implemented");
static_assert(kStages >= 3, "Only multistage is implemented");
using Mma0 = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB0, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, ArchTag,
ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator>::ThreadblockMma;
using Mma1 = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB1, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, ArchTag,
ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator>::ThreadblockMma;
using DualMma = threadblock::DualMmaMultistage<
typename Mma0::Shape,
typename Mma0::IteratorA,
typename Mma0::SmemIteratorA,
Mma0::kCacheOpA,
typename Mma0::IteratorB,
typename Mma0::SmemIteratorB,
Mma0::kCacheOpB,
typename Mma1::IteratorB,
typename Mma1::SmemIteratorB,
typename Mma0::ElementC,
typename Mma0::LayoutC,
typename Mma0::Policy,
typename Mma1::Policy,
Mma0::kStages,
SharedMemoryClearOption::kNone
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue0 =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename DualMma::Operator0, kPartitionsK, EpilogueOutputOp0,
EpilogueOutputOp0::kCount>::Epilogue;
using Epilogue1 =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename DualMma::Operator1, kPartitionsK, EpilogueOutputOp1,
EpilogueOutputOp1::kCount>::Epilogue;
/// Define the kernel-level GEMM operator.
using DualGemmKernel = kernel::DualGemm<
DualMma,
Epilogue0, Epilogue1, EpilogueOutputOp2,
ThreadblockSwizzle, kSplitKSerial,
kStoreD0, kStoreD1>;
/// Argument structure
struct Arguments {
//
// Data members
//
DualGemmMode mode;
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A0;
TensorRef<ElementB const, LayoutB0> ref_B0;
TensorRef<ElementC const, LayoutC> ref_C0;
TensorRef<ElementC, LayoutC> ref_D0;
TensorRef<ElementB const, LayoutB1> ref_B1;
TensorRef<ElementC const, LayoutC> ref_C1;
TensorRef<ElementC, LayoutC> ref_D1;
TensorRef<ElementC, LayoutC> ref_D2;
typename EpilogueOutputOp0::Params epilogue0;
typename EpilogueOutputOp1::Params epilogue1;
typename EpilogueOutputOp2::Params epilogue2;
int split_k_slices;
int batch_count;
int64_t batch_stride_A;
int64_t batch_stride_B0;
int64_t batch_stride_B1;
int64_t batch_stride_C;
int64_t batch_stride_D;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments(): problem_size(0, 0, 0), split_k_slices(1) {
}
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
DualGemmMode mode,
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A0_,
TensorRef<ElementB const, LayoutB0> ref_B0_,
TensorRef<ElementC const, LayoutC> ref_C0_,
TensorRef<ElementC, LayoutC> ref_D0_,
TensorRef<ElementB const, LayoutB1> ref_B1_,
TensorRef<ElementC const, LayoutC> ref_C1_,
TensorRef<ElementC, LayoutC> ref_D1_,
TensorRef<ElementC, LayoutC> ref_D2_,
typename EpilogueOutputOp0::Params epilogue0_ =
typename EpilogueOutputOp0::Params(),
typename EpilogueOutputOp1::Params epilogue1_ =
typename EpilogueOutputOp1::Params(),
typename EpilogueOutputOp2::Params epilogue2_ =
typename EpilogueOutputOp2::Params(),
int split_k_slices_ = 1,
int batch_count = 1,
int64_t batch_stride_A = 0,
int64_t batch_stride_B0 = 0,
int64_t batch_stride_B1 = 0,
int64_t batch_stride_C = 0,
int64_t batch_stride_D = 0
):
mode(mode),
problem_size(problem_size_),
ref_A0(ref_A0_),
ref_B0(ref_B0_),
ref_C0(ref_C0_),
ref_D0(ref_D0_),
ref_B1(ref_B1_),
ref_C1(ref_C1_),
ref_D1(ref_D1_),
ref_D2(ref_D2_),
epilogue0(epilogue0_),
epilogue1(epilogue1_),
epilogue2(epilogue2_),
split_k_slices(split_k_slices_),
batch_count(batch_count),
batch_stride_A(batch_stride_A),
batch_stride_B0(batch_stride_B0),
batch_stride_B1(batch_stride_B1),
batch_stride_C(batch_stride_C),
batch_stride_D(batch_stride_D) {
}
};
private:
/// Kernel parameters object
typename DualGemmKernel::Params params_;
public:
/// Constructs the GEMM.
DualGemm() = default;
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
if (args.mode == DualGemmMode::kBatched && kSplitKSerial) {
return Status::kErrorInvalidProblem;
}
if (!kSplitKSerial && args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
if (kStoreD0 != (args.ref_D0.data() != nullptr)) {
return Status::kErrorInternal;
}
if (kStoreD1 != (args.ref_D1.data() != nullptr)) {
return Status::kErrorInternal;
}
Status status = DualGemmKernel::can_implement(
args.problem_size,
args.ref_A0.non_const_ref(),
args.ref_B0.non_const_ref(),
args.ref_C0.non_const_ref(),
args.ref_D0,
args.ref_B1.non_const_ref(),
args.ref_C1.non_const_ref(),
args.ref_D1,
args.ref_D2
);
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
if (kSplitKSerial && args.split_k_slices > 1) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.mode == DualGemmMode::kBatched ? args.batch_count : args.split_k_slices);
if (kSplitKSerial) {
if (args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
}
// Initialize the Params structure
params_ = typename DualGemmKernel::Params{
args.mode,
args.problem_size,
grid_shape,
args.ref_A0.non_const_ref(),
args.ref_B0.non_const_ref(),
args.ref_C0.non_const_ref(),
args.ref_D0,
args.ref_B1.non_const_ref(),
args.ref_C1.non_const_ref(),
args.ref_D1,
args.ref_D2,
args.epilogue0,
args.epilogue1,
args.epilogue2,
reinterpret_cast<int *>(workspace),
args.batch_stride_A,
args.batch_stride_B0,
args.batch_stride_B1,
args.batch_stride_C,
args.batch_stride_D,
};
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
params_.ref_A0.reset(args.ref_A0.non_const_ref().data());
params_.ref_B0.reset(args.ref_B0.non_const_ref().data());
params_.ref_C0.reset(args.ref_C0.non_const_ref().data());
params_.ref_D0.reset(args.ref_D0.data());
params_.ref_B1.reset(args.ref_B1.non_const_ref().data());
params_.ref_C1.reset(args.ref_C1.non_const_ref().data());
params_.ref_D1.reset(args.ref_D1.data());
params_.ref_D2.reset(args.ref_D2.data());
params_.output_op_0 = args.epilogue0;
params_.output_op_1 = args.epilogue1;
params_.output_op_2 = args.epilogue2;
params_.semaphore = reinterpret_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(DualGemmKernel::kThreadCount, 1, 1);
cudaError_t result;
int smem_size = int(sizeof(typename DualGemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
result = cudaFuncSetAttribute(Kernel<DualGemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
cutlass::Kernel<DualGemmKernel><<<grid, block, smem_size, stream>>>(params_);
result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/45_dual_gemm/device/dual_gemm.h/0 | {
"file_path": "examples/45_dual_gemm/device/dual_gemm.h",
"repo_id": "examples",
"token_count": 6633
} | 14 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Additional permutation information for the example.
*/
#include "cutlass/layout/permute.h"
#include "cutlass/gemm/gemm.h"
namespace example
{
using namespace cute;
// This struct is specialized below for different CUTLASS 2.x permutation ops
// to describe the operation in terms of target CuTe shape and stride order.
template<class Permute>
struct PermuteTraits {};
// Use X as a placeholder for shape division result
using X = Underscore;
// Reshape a rank-2 shape into a multidimensional shape.
// Input:
// shape = (A, B, ...)
// target_shape = ((A1, ..., X, ..., Am), (B1, ..., X, ..., Bn), ...)
// Output:
// ((A1, ..., A/prod(A1..Am), ..., Am), (B1, ..., B/prod(B1..Bn), ..., Bn), ...)
template<class Shape, class TargetShape>
constexpr auto
reshape(Shape const& shape, TargetShape const& target_shape)
{
if constexpr (is_tuple<Shape>::value) {
return cute::transform(shape, target_shape, [](auto && s, auto && t){ return reshape(s, t); });
}
else {
auto idx = find_if(target_shape, [](auto x){ return is_underscore<decltype(x)>{}; });
constexpr int I = decltype(idx)::value;
static_assert(I < tuple_size_v<TargetShape>, "Each mode of TargetShape must contain a placeholder X");
auto divisors = remove<I>(target_shape);
assert(shape % product(divisors) == 0);
return replace<I>(target_shape, shape / product(divisors));
}
}
// Given a tensor layout, compute a permutation layout consisting of:
// - sub-modes corresponding to the implied multidimensional shape of the source tensor
// - strides accounting for the permutation operation being performed
template<class Permute, bool Transpose, class Shape, class Stride>
constexpr auto
make_permute_layout(Layout<Shape,Stride> const& layout) {
static_assert(cute::rank(Shape{}) == 3, "Only rank-3 layouts are supported");
if constexpr (Transpose) {
// Deal with tensor B by transposing appropriately before and after computing the permute layout.
// Its CuTe-canonical mode order is [N,K,L], while permute operations expect [row,col,batch].
return select<1,0,2>(make_permute_layout<Permute, false>(select<1,0,2>(layout)));
}
else {
if constexpr (cutlass::layout::is_trivial_permute<Permute>) {
// Special case for NoPermute. Use a depth-2 layout for consistency with other permutations.
using ShapeProfile = tuple<tuple<X>, tuple<X>, tuple<X>>;
return unflatten(layout, ShapeProfile{});
}
else {
// Here's where the permutation layout is actually built
using ShapeProfile = typename PermuteTraits<Permute>::ShapeProfile;
using StrideOrder = typename PermuteTraits<Permute>::StrideOrder;
return make_ordered_layout(reshape(layout.shape(), ShapeProfile{}), StrideOrder{});
}
}
}
namespace detail
{
template<int I>
struct is_constant_pred {
template <class T>
constexpr auto operator()(T) {
return is_constant<I, T>{};
}
};
template<class Permutation, int... I>
constexpr auto
inverse_impl(Permutation const & perm, seq<I...>) {
return cute::make_tuple(Int<find_if(Permutation{}, is_constant_pred<I>{})>{}...);
}
} // namespace detail
// Compute an inverse of a permutation represented as a tuple of cute::Int<>
template<class Permutation>
constexpr auto
inverse(Permutation const & perm) {
auto flat_perm = flatten(perm);
return unflatten(detail::inverse_impl(flat_perm, tuple_seq<decltype(flat_perm)>{}), perm);
}
template<class T>
using inverse_t = decltype(inverse(T{}));
// Given a rank-2 layout of tensor that is assumed to have been permuted,
// compute the original rank-2 layout of the tensor prior to the permutation.
// This is needed to form the correct input to the standalone permutation kernel.
template<class Permute, bool Transpose, class Shape, class Stride>
constexpr auto
make_original_layout(Layout<Shape,Stride> const& layout) {
static_assert(cute::rank(Shape{}) == 3, "Only rank-3 layouts are supported");
if constexpr (Transpose) {
// Deal with tensor B by transposing appropriately before and after computing the permute layout.
// Its CuTe-canonical mode order is [N,K,L], while permute operations expect [row,col,batch].
return select<1,0,2>(make_original_layout<Permute, false>(select<1,0,2>(layout)));
}
else {
using ShapeProfile = typename PermuteTraits<Permute>::ShapeProfile;
using IndexOrder = typename PermuteTraits<Permute>::IndexOrder;
using OrigOrder = conditional_t<cutlass::gemm::detail::is_major<0,Stride>(), seq<0,1,2>, seq<1,0,2>>;
auto orig_shape = select(flatten(reshape(layout.shape(), ShapeProfile{})), IndexOrder{});
// print("Permuted shape: "); print(reshape(layout.shape(), ShapeProfile{})); print("\n");
// print("Original shape: "); print(orig_shape); print("\n");
return make_ordered_layout(product_each(orig_shape), OrigOrder{});
}
}
/////////////// Tensor4DPermute0213 ////////////////////
template<int D1, int D2>
struct PermuteTraits<cutlass::layout::Tensor4DPermute0213ColumnMajor<D1, D2>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<X,Int<D1>>, Shape<Int<D2>,X>, Shape<X>>;
using IndexOrder = Step<Step<_0,_2>, Step<_1,_3>, Step<_4>>;
using StrideOrder = inverse_t<IndexOrder>; // Step<Step<_0,_2>, Step<_1,_3>, Step<_4>>;
};
template<int D1, int D2>
struct PermuteTraits<cutlass::layout::Tensor4DPermute0213ColumnMajorInverse<D1, D2>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<X,Int<D2>>, Shape<Int<D1>,X>, Shape<X>>;
using IndexOrder = Step<Step<_0,_2>, Step<_1,_3>, Step<_4>>;
using StrideOrder = inverse_t<IndexOrder>; // Step<Step<_0,_2>, Step<_1,_3>, Step<_4>>;
};
template<int D1, int D2>
struct PermuteTraits<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<Int<D1>,X>, Shape<X,Int<D2>>, Shape<X>>;
using IndexOrder = Step<Step<_1,_3>, Step<_0,_2>, Step<_4>>;
using StrideOrder = Step<Step<_1,_3>, Step<_0,_2>, Step<_4>>;
};
template<int D1, int D2>
struct PermuteTraits<cutlass::layout::Tensor4DPermute0213RowMajorInverse<D1, D2>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<Int<D2>,X>, Shape<X,Int<D1>>, Shape<X>>;
using IndexOrder = Step<Step<_1,_3>, Step<_0,_2>, Step<_4>>;
using StrideOrder = Step<Step<_1,_3>, Step<_0,_2>, Step<_4>>;
};
/////////////// Tensor4DPermuteBMM0321 ////////////////////
template<int D>
struct PermuteTraits<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D>>
{
static constexpr bool kBatched = true;
using ShapeProfile = Shape<Shape<X>, Shape<X>, Shape<Int<D>,X>>;
using IndexOrder = Step<Step<_0,_2>, Step<_1>, Step<_3>>;
using StrideOrder = Step<Step<_0>, Step<_2>, Step<_1,_3>>;
};
template<int D>
struct PermuteTraits<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajorInverse<D>>
{
static constexpr bool kBatched = true;
using ShapeProfile = Shape<Shape<X,Int<D>>, Shape<X>, Shape<X>>;
using IndexOrder = Step<Step<_0>, Step<_2>, Step<_1,_3>>;
using StrideOrder = Step<Step<_0,_2>, Step<_1>, Step<_3>>;
};
/////////////// Tensor4DPermuteBMM0213 ////////////////////
template<int D>
struct PermuteTraits<cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D>>
{
static constexpr bool kBatched = true;
using ShapeProfile = Shape<Shape<X>, Shape<X>, Shape<Int<D>,X>>;
using IndexOrder = Step<Step<_0>, Step<_1,_2>, Step<_3>>;
using StrideOrder = Step<Step<_2>, Step<_0>, Step<_1,_3>>;
};
template<int D>
struct PermuteTraits<cutlass::layout::Tensor4DPermuteBMM0213RowMajorInverse<D>>
{
static constexpr bool kBatched = true;
using ShapeProfile = Shape<Shape<X>, Shape<X,Int<D>>, Shape<X>>;
using IndexOrder = Step<Step<_0>, Step<_1>, Step<_2,_3>>;
using StrideOrder = Step<Step<_1>, Step<_0,_2>, Step<_3>>;
};
/////////////// Tensor5DPermute02413 ////////////////////
template<int D1, int D2, int D3>
struct PermuteTraits<cutlass::layout::Tensor5DPermute02413ColumnMajor<D1, D2, D3>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<X,Int<D1>>, Shape<Int<D2>,Int<D3>,X>, Shape<X>>;
using IndexOrder = Step<Step<_0,_2>, Step<_4,_1,_3>, Step<_5>>;
using StrideOrder = inverse_t<IndexOrder>; // Step<Step<_0,_3>, Step<_1,_4,_2>, Step<_5>>;
};
template<int D1, int D2, int D3>
struct PermuteTraits<cutlass::layout::Tensor5DPermute02413ColumnMajorInverse<D1, D2, D3>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<X,Int<D2>>, Shape<X,Int<D1>,Int<D3>>, Shape<X>>;
using IndexOrder = Step<Step<_0,_3>, Step<_1,_4,_2>, Step<_5>>;
using StrideOrder = inverse_t<IndexOrder>; // Step<Step<_0,_2>, Step<_4,_1,_3>, Step<_5>>;
};
/////////////// Tensor5DPermute20314 ////////////////////
template<int D1, int D2, int D3>
struct PermuteTraits<cutlass::layout::Tensor5DPermute20314RowMajor<D1, D2, D3>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<Int<D1>,X>, Shape<X,Int<D3>,Int<D2>>, Shape<X>>;
using IndexOrder = Step<Step<_2,_0>, Step<_3,_1,_4>, Step<_5>>;
using StrideOrder = Step<Step<_1,_3>, Step<_0,_2,_4>, Step<_5>>;
};
template<int D1, int D2, int D3>
struct PermuteTraits<cutlass::layout::Tensor5DPermute20314RowMajorInverse<D1, D2, D3>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<X,Int<D2>>, Shape<X,Int<D1>,Int<D3>>, Shape<X>>;
using IndexOrder = Step<Step<_3,_0>, Step<_2,_4,_1>, Step<_5>>;
using StrideOrder = Step<Step<_4,_2>, Step<_0,_3,_1>, Step<_5>>;
};
} // namespace example
| examples/53_hopper_gemm_permute/permute_traits.hpp/0 | {
"file_path": "examples/53_hopper_gemm_permute/permute_traits.hpp",
"repo_id": "examples",
"token_count": 4116
} | 15 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/atom/copy_atom.hpp"
#include <random>
#include "cutlass/util/print_error.hpp"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/collective/collective_mma.hpp"
using namespace cute;
struct AmpereUnpredicatedFprop {
//
// Static config for conv problem shape
//
using D = _6;
using H = _4;
using W = _4;
using T = _3;
using R = _3;
using S = _3;
using Z = _4;
using P = _2;
using Q = _2;
using C = _64;
using K = _128;
// Tiler config
using Tiler_K = decltype(cute::min(K{}, _128{}));
using Tiler_C = decltype(cute::min(C{}, _32{}));
using Tiler_N = _4;
using TileM = Tiler_K;
using TileN = Shape<Tiler_N, Z, P, Q>;
using TileK = Shape<Tiler_C,_1,_1,_1>;
using PIPE = _3;
using TilerFlt = Shape<TileM, TileK>;
using TilerAct = Shape<TileN, TileK>;
using TilerOut = Shape<TileM, TileN>;
using TileSizeM = Int<size(TileM{})>;
using TileSizeN = Int<size(TileN{})>;
using TileSizeK = Int<size(TileK{})>;
static constexpr int Stages = PIPE::value;
using ElementFlt = tfloat32_t;
using ElementAct = tfloat32_t;
using ElementOut = float;
using TiledMma = TiledMMA<
MMA_Atom<SM80_16x8x8_F32TF32TF32F32_TN>,
Layout<Shape<_2,_2,_1>>,
Tile<_32,_32,Underscore>>;
static constexpr int MaxThreadsPerBlock = size(TiledMma{});
static constexpr int MinBlocksPerMultiprocessor = 1;
union SharedStorage {
struct {
ElementFlt sAMatrix[size(TileM{}) * size(TileK{}) * size(PIPE{})];
ElementAct sBMatrix[size(TileN{}) * size(TileK{}) * size(PIPE{})];
} mainloop;
struct {
ElementOut sCMatrix[size(TileM{}) * size(TileN{})];
} epilogue;
};
//
// Stencil tensor
//
using GmemLayoutFlt = decltype(make_ordered_layout(
Shape< K, Shape< C, T, R, S>>{},
tuple<_4, tuple<_0,_3,_2,_1>>{}));
// We have 64 elements * 32b each in the major mode that we can vectorize
// Max vector size is 128b, so lay 16 threads along the major mode with a vector size of 4
// Rest along the minor mode
using GmemTiledCopyFlt = decltype(make_tiled_copy(
Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<uint128_t>, ElementFlt>{},
Layout<Shape <_16, _8>,
Stride< _8, _1>>{},
Layout<Shape < _1, _4>>{}));
// Following layout is also correct, but trades off dynamic strides in the slice for bank conflict free accesses
// using SmemLayoutFlt = decltype(
// composition(Swizzle<3,2,3>{},
// make_ordered_layout(
// Shape<TileSizeM,TileSizeK,PIPE>{},
// tuple< _1, _0, _2>{})));
using SmemLayoutAtomFlt = decltype(
composition(Swizzle<1,2,3>{},
Layout<Shape <_8,Shape <_4, _2>>,
Stride<_4,Stride<_1,_32>>>{}));
using SmemCopyAtomFlt = Copy_Atom<SM75_U32x4_LDSM_N, ElementFlt>;
//
// Activation tensor
//
// Activation tensor is major in the contraction mode, so vectorize that mode first
// Then lay out the rest of the threads along the other mode
using GmemTiledCopyAct = decltype(make_tiled_copy(
Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<uint128_t>, ElementAct>{},
Layout<Shape <_16, _8>,
Stride< _8, _1>>{},
Layout<Shape < _1, _4>>{}));
// Following layout is also correct, but trades off dynamic strides in the slice for bank conflict free accesses
// using SmemLayoutAct = decltype(
// composition(Swizzle<3,2,3>{},
// make_ordered_layout(
// Shape<TileSizeN,TileSizeK,PIPE>{},
// tuple< _1, _0, _2>{})));
using SmemLayoutAtomAct = decltype(
composition(Swizzle<1,2,3>{},
Layout<Shape <_8,Shape <_4, _2>>,
Stride<_4,Stride<_1,_32>>>{}));
using SmemCopyAtomAct = Copy_Atom<SM75_U32x4_LDSM_N, ElementAct>;
//
// Output tensor
//
using GmemTiledCopyOut = decltype(make_tiled_copy(
Copy_Atom<UniversalCopy<uint128_t>, ElementAct>{},
Layout<Shape <_8, _16>,
Stride<_1, _8>>{},
Layout<Shape <_4, _1>>{}));
using SmemCopyAtomOut = Copy_Atom<UniversalCopy<uint32_t>, ElementOut>;
// This can be optimized to make accesses BCF, but we use a col-major layout here to show off composability
using SmemLayoutOut = Layout<Shape<TileSizeM, TileSizeN>>;
//
// Conv functor
//
template <class EngineFlt, class TensorActivation, class TensorOutput>
void __device__
operator()(cute::Tensor<EngineFlt, GmemLayoutFlt> mFlt, // ( K, (C,T,R,S))
TensorActivation mAct, // ((N,Z,P,Q), (C,T,R,S))
TensorOutput mOut, // ( K, (N,Z,P,Q))
char* smem_buf) const {
using namespace cute;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveMma<
cutlass::gemm::MainloopSm80CpAsyncUnpredicated<PIPE::value>,
Shape<TileM,TileN,TileK>,
ElementFlt,
Underscore, // Ignore the stride, we are passing full cute::Tensor to operator()
ElementAct,
Underscore, // Ignore the stride, we are passing full cute::Tensor to operator()
TiledMma,
GmemTiledCopyFlt,
SmemLayoutAtomFlt,
SmemCopyAtomFlt,
cute::identity,
GmemTiledCopyAct,
SmemLayoutAtomAct,
SmemCopyAtomAct,
cute::identity>;
TiledMma tiled_mma;
Tensor accum = partition_fragment_C(tiled_mma, TilerOut{});
clear(accum);
// Set up tensors
// NOTE: blockIdx.x projects onto act-NDHW mode, y along the flt-K mode for the sake of higher dynamic range in NDHW
Tensor gA_mk = local_tile(mFlt, TilerFlt{}, make_coord(_,_)); // (BLK_M,BLK_K,m',k')
Tensor gB_nk = local_tile(mAct, TilerAct{}, make_coord(_,_)); // (BLK_N,BLK_K,n',_1)
Tensor gC_mn = local_tile(mOut, TilerOut{}, make_coord(_,_)); // (BLK_M,BLK_N,m',n')
// Compute m_coord and n_coord with their post-tiled shapes
auto m_coord = idx2crd(int(blockIdx.y), shape<2>(gA_mk));
auto n_coord = idx2crd(int(blockIdx.x), shape<2>(gB_nk));
Tensor gA = gA_mk(_,_,m_coord,_); // (BLK_M,BLK_K,k')
Tensor gB = gB_nk(_,_,n_coord,_); // (BLK_N,BLK_K,_1)
Tensor gC = gC_mn(_,_,m_coord,n_coord); // (BLK_M,BLK_N)
auto k_tile_iter = cute::make_coord_iterator(size<2>(gA));
int k_tile_count = size<2>(gA);
CollectiveMainloop collective_mma;
collective_mma(
accum,
gA,
gB,
accum,
k_tile_iter, k_tile_count,
Underscore{}, // no residue since we do not support predication
threadIdx.x,
smem_buf);
//
// Epilogue
//
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf);
Tensor sC = make_tensor(make_smem_ptr(&storage.epilogue.sCMatrix[0]), SmemLayoutOut{});
auto smem_tiled_copy_C = make_tiled_copy_C(SmemCopyAtomOut{}, tiled_mma);
auto smem_thr_copy_C = smem_tiled_copy_C.get_slice(threadIdx.x);
auto tCrC = smem_thr_copy_C.retile_S(accum);
auto tCsC = smem_thr_copy_C.partition_D(sC);
copy(smem_tiled_copy_C, tCrC, tCsC);
__syncthreads();
GmemTiledCopyOut gmem_tiled_copy_C;
auto gmem_thr_copy_C = gmem_tiled_copy_C.get_slice(threadIdx.x);
auto tDsC = gmem_thr_copy_C.partition_S(sC);
auto tDgC = gmem_thr_copy_C.partition_D(gC);
copy(gmem_tiled_copy_C, tDsC, tDgC);
#if 0
if (thread0()) {
print("mAct = "); print(mAct); print('\n');
print("mFlt = "); print(mFlt); print('\n');
print("mOut = "); print(mOut); print('\n');
print("gA = "); print(gA); print('\n');
print("gB = "); print(gB); print('\n');
print("gC = "); print(gC); print('\n');
print("sA = "); print(sA.layout()); print('\n');
print("sB = "); print(sB.layout()); print('\n');
print("sC = "); print(sC.layout()); print('\n');
print("tAgA = "); print(tAgA.layout()); print('\n');
print("tBgB = "); print(tBgB.layout()); print('\n');
print("tAsA = "); print(tAsA.layout()); print('\n');
print("tBsB = "); print(tBsB.layout()); print('\n');
print("tCsA = "); print(tCsA.layout()); print('\n');
print("tCsB = "); print(tCsB.layout()); print('\n');
print("tCrC = "); print(tCrC.layout()); print('\n');
print("tCsC = "); print(tCsC.layout()); print('\n');
print("tDsC = "); print(tDsC.layout()); print('\n');
print("tDgC = "); print(tDgC.layout()); print('\n');
print("gmem tiled copy A = "); print(gmem_tiled_copy_A); print('\n');
print("gmem tiled copy B = "); print(gmem_tiled_copy_B); print('\n');
print("gmem tiled copy C = "); print(gmem_tiled_copy_C); print('\n');
print("k_tile_count = "); print(size<2>(gA)); print('\n');
print("k_tile_iter = "); print(*k_tile_iter); print('\n');
print("K_BLOCK_MAX = "); print(K_BLOCK_MAX); print('\n');
}
#endif
}
};
template <class TensorFlt, class TensorAct, class TensorOut>
inline int
fprop_reference(
TensorFlt mStencil, // Logical MK: ( K, (C,T,R,S))
TensorAct mActivation, // Logical NK: ((N,Z,P,Q), (C,T,R,S))
TensorOut mOutput, // Logical MN: ( K, (N,Z,P,Q))
TensorOut mOutputRef) {
int32_t N = size<1,0>(mOutputRef);
int32_t Z = size<1,1>(mOutputRef);
int32_t P = size<1,2>(mOutputRef);
int32_t Q = size<1,3>(mOutputRef);
int32_t T = size<1,3>(mStencil);
int32_t R = size<1,2>(mStencil);
int32_t S = size<1,1>(mStencil);
int32_t C = size<1,0>(mStencil);
size_t K = static_cast<size_t>(size<0>(mOutputRef));
size_t NZPQ = static_cast<size_t>(size<1>(mOutputRef));
size_t CTRS = static_cast<size_t>(size<1>(mStencil));
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (size_t logical_m = 0; logical_m < K; ++logical_m) {
for (size_t logical_n = 0; logical_n < NZPQ; ++logical_n) {
auto accumulator = float(0);
for (size_t logical_k = 0; logical_k < CTRS; ++logical_k) {
accumulator += mStencil(logical_m, logical_k) * mActivation(logical_n, logical_k);
}
mOutputRef(logical_m, logical_n) = accumulator;
}
}
return print_relative_error(mOutput, mOutputRef, /*print_verbose*/ false, /*print_error*/ true, /*error_margin*/ 0.01);
}
| examples/59_ampere_gather_scatter_conv/ampere_conv_kernel.h/0 | {
"file_path": "examples/59_ampere_gather_scatter_conv/ampere_conv_kernel.h",
"repo_id": "examples",
"token_count": 5630
} | 16 |
<jupyter_start><jupyter_text>Exporting a CUTLASS grouped GEMM kernel to a PyTorch CUDA extensionThis notebook walks through a basic example of using the CUTLASS Python interface to declarea grouped GEMM kernel and export it as a PyTorch CUDA extension. Note that GEMM and Conv2d can also be exported as PyTorch CUDA extensions. [](https://colab.research.google.com/github/NVIDIA/cutlass/blob/main/examples/python/02_pytorch_extension_grouped_gemm.ipynb) Prerequisites for running on ColabThis notebook requires an NVIDIA GPU. If `nvidia-smi` fails, go to Runtime -> Change runtime type -> Hardware accelerator and confirm a GPU is selected.<jupyter_code>!#nvidia-smi<jupyter_output><empty_output><jupyter_text>If running on Colab, you will need to install the CUTLASS Python interface and PyTorch. To do so, uncomment the following line and run the cell:<jupyter_code>!#pip install nvidia-cutlass torch --extra-index-url https://download.pytorch.org/whl/cu121<jupyter_output><empty_output><jupyter_text>Background on grouped GEMMGrouped GEMM enables one to execute a set of GEMMs (each with potentially different sizes and strides)in a single CUDA kernel. It can be thought of as a generalized version of a pointer-array GEMM,without the requirement that the sizes and strides of each GEMM be the same.For example, if one has `p` GEMMs with sizes:```textM_1 x N_1 x K_1M_2 x N_2 x K_2...M_p x N_p x K_p```CUTLASS's grouped GEMM will execute these in a single CUDA kernel.Grouped GEMM is particularly beneficial for saturating the GPU with many small problems that wouldinsufficiently utilize the device in isolation. Declaring a grouped GEMM via the CUTLASS Python interfaceA grouped GEMM operation is declared similarly to a GEMM operation in the CUTLASS Python interface: onesimply calls `cutlass.op.GroupedGemm`.<jupyter_code>import cutlass
import torch
dtype = torch.float16
plan = cutlass.op.GroupedGemm(element=dtype, layout=cutlass.LayoutType.RowMajor)<jupyter_output><empty_output><jupyter_text>We can then compile and run this operation on a group of GEMMs. We'll first set up some utility functions to initialize GEMMs.<jupyter_code>import random
random.seed(2023)
# Utility function to initialize A, B, C, and D matrices corresponding to dimensions M, N, and K
def initialize(dtype, M, N, K):
sizes = [(M, K), (K, N), (M, N), (M, N)]
return [torch.randint(-3, 3, size, device='cuda').to(dtype) for size in sizes]
# Utility function to generate `problems` GEMMs of random sizes
def generate_problems(problems):
valid_sizes = [128, 256, 512, 1024]
As, Bs, Cs, Ds = [], [], [], []
for _ in range(problems):
M, N, K = [random.choice(valid_sizes) for _ in range(3)]
A, B, C, D = initialize(dtype, M, N, K)
As.append(A)
Bs.append(B)
Cs.append(C)
Ds.append(D)
return As, Bs, Cs, Ds<jupyter_output><empty_output><jupyter_text>We'll next run a group of 20 GEMMs via the CUTLASS Python interface and via PyTorch.<jupyter_code>As, Bs, Cs, Ds, = generate_problems(20)
plan.run(As, Bs, Cs, Ds, print_module=True)
Ds_torch = [a @ b for a, b in zip(As, Bs)]
for d, d_torch in zip(Ds, Ds_torch):
assert torch.allclose(d, d_torch)<jupyter_output><empty_output><jupyter_text>Exporting the CUTLASS kernel to a PyTorch CUDA extensionThe procedure above allows one to quickly experiment with using a CUTLASS kernels However, one might prefer to use the CUTLASS kernel via a [PyTorch CUDA extension](https://pytorch.org/tutorials/advanced/cpp_extension.html). This will avoids adding any runtime overheads associated with the Python portions of the CUTLASS Python interface.The CUTLASS Python interface provides simple solutions for creating PyTorch CUDA extensions for a CUTLASS kernel. These extensions can either be written out for a later "ahead-of-time" compilation, or be just-in-time compiled and returned to the user.To create a JIT-compiled module from the CUTLASS kernel we defined above, simply call the following:<jupyter_code>op = plan.construct()
grouped_gemm = cutlass.emit.pytorch(op, name='grouped_gemm', cc=plan.cc, sourcedir='out', jit=True)<jupyter_output><empty_output><jupyter_text>The `cutlass.emit.pytorch` function emits:* `out/grouped_gemm_kernel.cu`: This file contains the declaration of the CUTLASS kernel and a method to call it from PyTorch tensors* `out/grouped_gemm.cpp`: This file contains a C++ wrapper around the aforementioned CUTLASS kernel* `setup.py`: This file contains the `setuptools` script for building and installing the generated extensionThe extension can be build from within the `module_output` directory by running:```bashTORCH_CUDA_ARCH_LIST="8.0" python setup.py install```Where `TORCH_ARCH_LIST` is set to the compute capability of the device on which the kernel will be run.See the PyTorch ["Custom C++ and CUDA Extensions"](https://pytorch.org/tutorials/advanced/cpp_extension.html) tutorial for more details on this.The PyTorch CUDA extension could be built for this module by running:```bashcd outTORCH_CUDA_ARCH_LIST="8.0" python setup.py```(assuming that one is building for SM80)One could then use the kernel in a later PyTorch module by running:```pythonimport torchimport grouped_gemmgrouped_gemm.run(As, Bs)```In this case, however, we set `jit=True`, which specifies that we would like to compile and load the PyTorch CUDA extension on the fly.Under the hood, this leverages the [torch.utils.cpp_extension.load](https://pytorch.org/tutorials/advanced/cpp_extension.html) methodand returns back the loaded extension.We can then use the extension and compare its results to running the GEMMs via vanilla PyTorch GEMMs:<jupyter_code>Ds = grouped_gemm.run(As, Bs)
Ds_torch = [a @ b for a, b in zip(As, Bs)]
for d, d_torch in zip(Ds, Ds_torch):
assert torch.allclose(d, d_torch)<jupyter_output><empty_output><jupyter_text>Finally, we can profile our grouped GEMM extension:<jupyter_code>num_warmup = 20
num_profile = 100
# Warmup iterations
for _ in range(num_warmup):
Ds = grouped_gemm.run(As, Bs)
Ds_torch = [a @ b for a, b in zip(As, Bs)]
torch.cuda.synchronize()
# Timing iterations
import time
grouped = 0
nongrouped = 0
for _ in range(num_profile):
start = time.time()
Ds = grouped_gemm.run(As, Bs)
torch.cuda.synchronize()
grouped += time.time() - start
start = time.time()
Ds_torch = [a @ b for a, b in zip(As, Bs)]
torch.cuda.synchronize()
nongrouped += time.time() - start
print('Grouped: {:.3f} us'.format(grouped * 1e6/num_profile))
print('Non-Grouped: {:.3f} us'.format(nongrouped * 1e6/num_profile))
print('Speedup: {:.3f}'.format(nongrouped / grouped))<jupyter_output><empty_output> | examples/python/02_pytorch_extension_grouped_gemm.ipynb/0 | {
"file_path": "examples/python/02_pytorch_extension_grouped_gemm.ipynb",
"repo_id": "examples",
"token_count": 2266
} | 17 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
// Config
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) && \
((__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))))
# define CUTE_ARCH_CLUSTER_SM90_ENABLED
#endif
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) && (__CUDACC_VER_MAJOR__ >= 12))
# define CUTE_ARCH_ELECT_ONE_SM90_ENABLED
#endif
namespace cute {
CUTE_DEVICE void cluster_arrive_relaxed()
{
#if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED)
asm volatile("barrier.cluster.arrive.relaxed.aligned;\n" : : );
#else
CUTE_INVALID_CONTROL_PATH("CUTE_ARCH_CLUSTER_SM90_ENABLED is not defined");
#endif
}
CUTE_DEVICE void cluster_arrive()
{
#if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED)
asm volatile("barrier.cluster.arrive.aligned;\n" : : );
#else
CUTE_INVALID_CONTROL_PATH("CUTE_ARCH_CLUSTER_SM90_ENABLED is not defined");
#endif
}
CUTE_DEVICE void cluster_wait()
{
#if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED)
asm volatile("barrier.cluster.wait.aligned;\n" : : );
#else
CUTE_INVALID_CONTROL_PATH("CUTE_ARCH_CLUSTER_SM90_ENABLED is not defined");
#endif
}
CUTE_DEVICE void cluster_sync()
{
#if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED)
cluster_arrive();
cluster_wait();
#else
CUTE_INVALID_CONTROL_PATH("CUTE_ARCH_CLUSTER_SM90_ENABLED is not defined");
#endif
}
// Returns the dim3 grid size in terms of number of clusters.
CUTE_DEVICE dim3 cluster_grid_dims()
{
#if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED)
uint32_t x, y, z;
asm volatile("mov.u32 %0, %%nclusterid.x;\n" : "=r"(x) : );
asm volatile("mov.u32 %0, %%nclusterid.y;\n" : "=r"(y) : );
asm volatile("mov.u32 %0, %%nclusterid.z;\n" : "=r"(z) : );
return {x, y, z};
#elif defined(__CUDA_ARCH__)
// MSVC requires protecting use of gridDim with __CUDA_ARCH__.
return gridDim;
#elif defined(_MSC_VER)
CUTE_INVALID_CONTROL_PATH("cluster_grid_dims() can only be called on device");
return {0, 0, 0};
#else
return {0, 0, 0};
#endif
}
// Returns the dim3 cluster rank in the grid.
CUTE_DEVICE dim3 cluster_id_in_grid()
{
#if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED)
uint32_t x, y, z;
asm volatile("mov.u32 %0, %%clusterid.x;\n" : "=r"(x) : );
asm volatile("mov.u32 %0, %%clusterid.y;\n" : "=r"(y) : );
asm volatile("mov.u32 %0, %%clusterid.z;\n" : "=r"(z) : );
return {x, y, z};
#elif defined(__CUDA_ARCH__)
// MSVC requires protecting use of blockIdx with __CUDA_ARCH__.
return blockIdx;
#elif defined(_MSC_VER)
CUTE_INVALID_CONTROL_PATH("cluster_id_in_grid() can only be called on device");
return {0, 0, 0};
#else
return {0, 0, 0};
#endif
}
// Returns the relative dim3 block rank local to the cluster.
CUTE_DEVICE dim3 block_id_in_cluster()
{
#if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED)
uint32_t x, y, z;
asm volatile("mov.u32 %0, %%cluster_ctaid.x;\n" : "=r"(x) : );
asm volatile("mov.u32 %0, %%cluster_ctaid.y;\n" : "=r"(y) : );
asm volatile("mov.u32 %0, %%cluster_ctaid.z;\n" : "=r"(z) : );
return {x, y, z};
#else
return {0,0,0};
#endif
}
// Returns the dim3 cluster shape.
CUTE_DEVICE dim3 cluster_shape()
{
#if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED)
uint32_t x, y, z;
asm volatile("mov.u32 %0, %%cluster_nctaid.x;\n" : "=r"(x) : );
asm volatile("mov.u32 %0, %%cluster_nctaid.y;\n" : "=r"(y) : );
asm volatile("mov.u32 %0, %%cluster_nctaid.z;\n" : "=r"(z) : );
return {x, y, z};
#else
return {1,1,1};
#endif
}
// Get 1D ctaid in a cluster.
CUTLASS_DEVICE uint32_t block_rank_in_cluster()
{
#if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED)
uint32_t rank;
asm volatile("mov.u32 %0, %%cluster_ctarank;\n" : "=r"(rank) :);
return rank;
#else
return 0;
#endif
}
// Set the destination block-ID in cluster for a given SMEM Address
CUTLASS_DEVICE uint32_t set_block_rank(uint32_t smemAddr, uint32_t rank)
{
#if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED)
uint32_t result;
asm volatile("mapa.shared::cluster.u32 %0, %1, %2;\n"
: "=r"(result)
: "r"(smemAddr), "r"(rank));
return result;
#else
return smemAddr;
#endif
}
// Elect one thread in the warp. The elected thread gets its predicate set to true, all others obtain false.
CUTE_HOST_DEVICE uint32_t elect_one_sync()
{
#if defined(CUTE_ARCH_ELECT_ONE_SM90_ENABLED)
uint32_t pred = 0;
uint32_t laneid = 0;
asm volatile(
"{\n"
".reg .b32 %%rx;\n"
".reg .pred %%px;\n"
" elect.sync %%rx|%%px, %2;\n"
"@%%px mov.s32 %1, 1;\n"
" mov.s32 %0, %%rx;\n"
"}\n"
: "+r"(laneid), "+r"(pred)
: "r"(0xFFFFFFFF));
return pred;
#elif defined(__CUDA_ARCH__)
return (threadIdx.x % 32) == 0;
#else
return true;
#endif
}
struct ElectOneLaneIdReturnType {
uint32_t is_leader;
uint32_t leader_lane_id;
};
CUTE_HOST_DEVICE
ElectOneLaneIdReturnType
elect_one_leader_sync()
{
#if defined(CUTE_ARCH_ELECT_ONE_SM90_ENABLED)
uint32_t pred = 0;
uint32_t laneid = 0;
asm volatile(
"{\n"
".reg .b32 %%rx;\n"
".reg .pred %%px;\n"
" elect.sync %%rx|%%px, %2;\n"
"@%%px mov.s32 %1, 1;\n"
" mov.s32 %0, %%rx;\n"
"}\n"
: "+r"(laneid), "+r"(pred)
: "r"(0xFFFFFFFF));
return {pred, laneid};
#elif defined(__CUDA_ARCH__)
return {(threadIdx.x % 32) == 0, 0};
#else
return {true, 0};
#endif
}
// Store value to remote shared memory in the cluster
CUTE_DEVICE
void
store_shared_remote(uint32_t value, uint32_t smem_addr, uint32_t mbarrier_addr, uint32_t dst_cta_rank)
{
#if defined(CUTE_ARCH_CLUSTER_SM90_ENABLED)
uint32_t dsmem_addr = set_block_rank(smem_addr, dst_cta_rank);
uint32_t remote_barrier_addr = set_block_rank(mbarrier_addr, dst_cta_rank);
asm volatile("st.async.shared::cluster.mbarrier::complete_tx::bytes.u32 [%0], %1, [%2];"
: : "r"(dsmem_addr), "r"(value), "r"(remote_barrier_addr));
#endif
}
} // end namespace cute
| include/cute/arch/cluster_sm90.hpp/0 | {
"file_path": "include/cute/arch/cluster_sm90.hpp",
"repo_id": "include",
"token_count": 3131
} | 18 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/numeric/integer_sequence.hpp>
#if defined(__clang__) && defined(__CUDA__)
// __cvta_generic_to_shared was added in Clang 14: https://reviews.llvm.org/D111665
#if __clang_major__ >= 14
#define CUTE_CLANG_SUPPORTS_CVTA_GENERIC_TO_SHARED 1
#endif
// __nvvm_get_smem_pointer added in Clang 14: https://reviews.llvm.org/D111665
// ... but will not work on Windows until Clang 15: https://reviews.llvm.org/D122897
#if (!defined(_WIN32) && __clang_major__ >= 14) || __clang_major__ >= 15
#define CUTE_CLANG_SUPPORTS_NVVM_GET_SMEM_POINTER 1
#endif
#endif
#if defined(__NVCC__) || defined(__CUDACC_RTC__)
// __cvta_generic_to_shared added in CUDA 11+
#if __CUDACC_VER_MAJOR__ >= 11
#define CUTE_NVCC_SUPPORTS_CVTA_GENERIC_TO_SHARED 1
#endif
// __nvvm_get_smem_pointer added in CUDA 10.2
#if __CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2
#define CUTE_NVCC_SUPPORTS_NVVM_GET_SMEM_POINTER 1
#endif
#endif
#if CUTE_NVCC_SUPPORTS_CVTA_GENERIC_TO_SHARED || CUTE_CLANG_SUPPORTS_CVTA_GENERIC_TO_SHARED
#define CUTE_CVTA_GENERIC_TO_SHARED_SUPPORTED 1
#endif
#if !defined(CUTE_CVTA_GENERIC_TO_SHARED_ACTIVATED) && CUTE_CVTA_GENERIC_TO_SHARED_SUPPORTED && defined(__CUDA_ARCH__)
#define CUTE_CVTA_GENERIC_TO_SHARED_ACTIVATED 1
#endif
#if CUTE_NVCC_SUPPORTS_NVVM_GET_SMEM_POINTER || CUTE_CLANG_SUPPORTS_NVVM_GET_SMEM_POINTER
#define CUTE_NVVM_GET_SMEM_POINTER_SUPPORTED 1
#endif
#if !defined(CUTE_NVVM_GET_SMEM_POINTER_ACTIVATED) && CUTE_NVVM_GET_SMEM_POINTER_SUPPORTED && defined(__CUDA_ARCH__)
#define CUTE_NVVM_GET_SMEM_POINTER_ACTIVATED 1
#endif
// Clang 14+ provides a declaration of __nvvm_get_smem_pointer, so we only need
// to provide one for NVCC
#if CUTE_NVCC_SUPPORTS_NVVM_GET_SMEM_POINTER
extern "C" {
// This NVVM intrinsic is subject to change in future versions of CUDA.
// Clients should not call it directly.
CUTE_DEVICE uint32_t __nvvm_get_smem_pointer(void*);
}
#endif
namespace cute
{
/// CUTE helper to cast SMEM pointer to unsigned
CUTE_DEVICE
uint32_t
cast_smem_ptr_to_uint(void const* const ptr)
{
// We prefer to use the new CVTA intrinsics if they are available, otherwise we will fall back to
// the previous internal intrinsics if they are available.
#if CUTE_CVTA_GENERIC_TO_SHARED_ACTIVATED
//
// This NVVM intrinsic converts an address in shared memory to a plain
// unsigned integer. This is necessary to pass to shared memory instructions
// in inline PTX.
//
// In CUDA 11 and beyond, this replaces __nvvm_get_smem_pointer() [only available in 10.2].
//
//__device__ size_t __cvta_generic_to_shared(void* ptr);
/// CUTE helper to get SMEM pointer
return static_cast<uint32_t>(__cvta_generic_to_shared(ptr));
#elif CUTE_NVVM_GET_SMEM_POINTER_ACTIVATED
return __nvvm_get_smem_pointer(ptr);
#elif defined(__CUDA_ARCH__)
uint32_t smem_ptr;
asm(
"{ .reg .u64 smem_ptr; cvta.to.shared.u64 smem_ptr, %1; cvt.u32.u64 %0, smem_ptr; }\n"
: "=r"(smem_ptr) : "l"(ptr));
return smem_ptr;
#else
(void) ptr;
printf("ERROR: cast_smem_ptr_to_uint not supported but used.\n");
return 0;
#endif
}
namespace detail {
//
// Wrapper for MMAOp::fma
//
template <class MmaOp>
struct CallFMA {
template <class... Args>
CUTE_HOST_DEVICE constexpr void
operator()(Args&&... args) const {
return MmaOp::fma(static_cast<Args&&>(args)...);
}
};
//
// Wrapper for CopyOp::copy
//
template <class CopyOp>
struct CallCOPY {
template <class... Args>
CUTE_HOST_DEVICE constexpr void
operator()(Args&&... args) const {
return CopyOp::copy(static_cast<Args&&>(args)...);
}
};
//
// Utility for exploding pointers/arrays/tensors into functions
//
template <class Fn,
class PtrA, int... I>
CUTE_HOST_DEVICE constexpr
void
explode(Fn fn,
PtrA&& a, int_sequence<I...>)
{
return fn(a[I]...);
}
template <class Fn,
class PtrS, int... Is,
class PtrD, int... Id>
CUTE_HOST_DEVICE constexpr
void
explode(Fn fn,
PtrS&& s, int_sequence<Is...>,
PtrD&& d, int_sequence<Id...>)
{
return fn(s[Is]..., d[Id]...);
}
template <class Fn,
class PtrA, int... Ia,
class PtrB, int... Ib,
class PtrC, int... Ic>
CUTE_HOST_DEVICE constexpr
void
explode(Fn fn,
PtrA&& a, int_sequence<Ia...>,
PtrB&& b, int_sequence<Ib...>,
PtrC&& c, int_sequence<Ic...>)
{
return fn(a[Ia]..., b[Ib]..., c[Ic]...);
}
template <class Fn,
class PtrD, int... Id,
class PtrA, int... Ia,
class PtrB, int... Ib,
class PtrC, int... Ic>
CUTE_HOST_DEVICE constexpr
void
explode(Fn fn,
PtrD&& d, int_sequence<Id...>,
PtrA&& a, int_sequence<Ia...>,
PtrB&& b, int_sequence<Ib...>,
PtrC&& c, int_sequence<Ic...>)
{
return fn(d[Id]..., a[Ia]..., b[Ib]..., c[Ic]...);
}
template <class Fn,
class PtrD, int... Id,
class PtrA, int... Ia,
class PtrB, int... Ib,
class PtrC, int... Ic,
class PtrE, int... Ie>
CUTE_HOST_DEVICE constexpr
void
explode(Fn fn,
PtrD&& d, int_sequence<Id...>,
PtrA&& a, int_sequence<Ia...>,
PtrB&& b, int_sequence<Ib...>,
PtrC&& c, int_sequence<Ic...>,
PtrE&& e, int_sequence<Ie...>)
{
return fn(d[Id]..., a[Ia]..., b[Ib]..., c[Ic]..., e[Ie]...);
}
template <class Fn,
class PtrD, int... Id,
class PtrA, int... Ia,
class PtrB, int... Ib,
class PtrC, int... Ic,
class PtrSFA, int... Isfa,
class PtrSFB, int... Isfb>
CUTE_HOST_DEVICE constexpr
void
explode(Fn fn,
PtrD&& d, int_sequence<Id...>,
PtrA&& a, int_sequence<Ia...>,
PtrB&& b, int_sequence<Ib...>,
PtrC&& c, int_sequence<Ic...>,
PtrSFA&& sfa, int_sequence<Isfa...>,
PtrSFB&& sfb, int_sequence<Isfb...>)
{
return fn(d[Id]..., a[Ia]..., b[Ib]..., c[Ic]..., sfa[Isfa]..., sfb[Isfb]...);
}
//
// Utility for exploding tuples into functions
//
template <class Fn,
class TupleA, int... I>
CUTE_HOST_DEVICE constexpr
void
explode_tuple(Fn fn,
TupleA&& a, int_sequence<I...>)
{
return fn(get<I>(a)...);
}
template <class Fn,
class TupleA, int... Ia,
class TupleB, int... Ib>
CUTE_HOST_DEVICE constexpr
void
explode_tuple(Fn fn,
TupleA&& a, int_sequence<Ia...>,
TupleB&& b, int_sequence<Ib...>)
{
return fn(get<Ia>(a)..., get<Ib>(b)...);
}
template <class Fn,
class TupleA, int... Ia,
class TupleB, int... Ib,
class TupleC, int... Ic>
CUTE_HOST_DEVICE constexpr
void
explode_tuple(Fn fn,
TupleA&& a, int_sequence<Ia...>,
TupleB&& b, int_sequence<Ib...>,
TupleC&& c, int_sequence<Ic...>)
{
return fn(get<Ia>(a)..., get<Ib>(b)..., get<Ic>(c)...);
}
} // end namespace detail
} // end namespace cute
| include/cute/arch/util.hpp/0 | {
"file_path": "include/cute/arch/util.hpp",
"repo_id": "include",
"token_count": 3703
} | 19 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cstdint>
#else
#include <cstdint>
#endif
#include <cutlass/numeric_types.h>
namespace cute
{
//
// Signed integers
//
using int2_t = cutlass::int2b_t;
using int4_t = cutlass::int4b_t;
using CUTE_STL_NAMESPACE::int8_t;
using CUTE_STL_NAMESPACE::int16_t;
using CUTE_STL_NAMESPACE::int32_t;
using CUTE_STL_NAMESPACE::int64_t;
template <int N> struct int_bit;
template <> struct int_bit< 2> { using type = cutlass::int2b_t; };
template <> struct int_bit< 4> { using type = cutlass::int4b_t; };
template <> struct int_bit< 8> { using type = int8_t; };
template <> struct int_bit< 16> { using type = int16_t; };
template <> struct int_bit< 32> { using type = int32_t; };
template <> struct int_bit< 64> { using type = int64_t; };
template <int N>
using int_bit_t = typename int_bit<N>::type;
template <int N>
using int_byte = int_bit<8*N>;
template <int N>
using int_byte_t = typename int_byte<N>::type;
//
// Unsigned integers
//
using uint1_t = cutlass::uint1b_t;
using uint2_t = cutlass::uint2b_t;
using uint4_t = cutlass::uint4b_t;
using CUTE_STL_NAMESPACE::uint8_t;
using CUTE_STL_NAMESPACE::uint16_t;
using CUTE_STL_NAMESPACE::uint32_t;
using CUTE_STL_NAMESPACE::uint64_t;
using cutlass::uint128_t;
template <int N> struct uint_bit;
template <> struct uint_bit< 1> { using type = cutlass::uint1b_t; };
template <> struct uint_bit< 2> { using type = cutlass::uint2b_t; };
template <> struct uint_bit< 4> { using type = cutlass::uint4b_t; };
template <> struct uint_bit< 8> { using type = uint8_t; };
template <> struct uint_bit< 16> { using type = uint16_t; };
template <> struct uint_bit< 32> { using type = uint32_t; };
template <> struct uint_bit< 64> { using type = uint64_t; };
template <> struct uint_bit<128> { using type = cutlass::uint128_t; };
template <int N>
using uint_bit_t = typename uint_bit<N>::type;
template <int N>
using uint_byte = uint_bit<8*N>;
template <int N>
using uint_byte_t = typename uint_byte<N>::type;
} // namespace cute
| include/cute/numeric/int.hpp/0 | {
"file_path": "include/cute/numeric/int.hpp",
"repo_id": "include",
"token_count": 1299
} | 20 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/container/tuple.hpp>
#include <cute/algorithm/tuple_algorithms.hpp>
#include <cute/numeric/integral_constant.hpp>
#include <cute/numeric/integer_sequence.hpp>
namespace cute
{
// For slicing
struct Underscore : Int<0> {};
CUTE_INLINE_CONSTANT Underscore _;
// Convenient alias
using X = Underscore;
// Treat Underscore as an integral like integral_constant
template <>
struct is_integral<Underscore> : true_type {};
template <class T>
struct is_underscore : false_type {};
template <>
struct is_underscore<Underscore> : true_type {};
// Tuple trait for detecting static member element
template <class Tuple, class Elem, class Enable = void>
struct has_elem : false_type {};
template <class Elem>
struct has_elem<Elem, Elem> : true_type {};
template <class Tuple, class Elem>
struct has_elem<Tuple, Elem, enable_if_t<is_tuple<Tuple>::value> >
: has_elem<Tuple, Elem, tuple_seq<Tuple> > {};
template <class Tuple, class Elem, int... Is>
struct has_elem<Tuple, Elem, seq<Is...>>
: disjunction<has_elem<tuple_element_t<Is, Tuple>, Elem>...> {};
// Tuple trait for detecting static member element
template <class Tuple, class Elem, class Enable = void>
struct all_elem : false_type {};
template <class Elem>
struct all_elem<Elem, Elem> : true_type {};
template <class Tuple, class Elem>
struct all_elem<Tuple, Elem, enable_if_t<is_tuple<Tuple>::value> >
: all_elem<Tuple, Elem, tuple_seq<Tuple> > {};
template <class Tuple, class Elem, int... Is>
struct all_elem<Tuple, Elem, seq<Is...>>
: conjunction<all_elem<tuple_element_t<Is, Tuple>, Elem>...> {};
// Tuple trait for detecting Underscore member
template <class Tuple>
using has_underscore = has_elem<Tuple, Underscore>;
template <class Tuple>
using all_underscore = all_elem<Tuple, Underscore>;
template <class Tuple>
using has_int1 = has_elem<Tuple, Int<1>>;
template <class Tuple>
using has_int0 = has_elem<Tuple, Int<0>>;
//
// Slice keeps only the elements of Tuple B that are paired with an Underscore
//
namespace detail {
template <class A, class B>
CUTE_HOST_DEVICE constexpr
auto
lift_slice(A const& a, B const& b)
{
if constexpr (is_tuple<A>::value) {
static_assert(tuple_size<A>::value == tuple_size<B>::value, "Mismatched Ranks");
return filter_tuple(a, b, [](auto const& x, auto const& y) { return lift_slice(x,y); });
} else if constexpr (is_underscore<A>::value) {
return cute::tuple<B>{b};
} else {
return cute::tuple<>{};
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
// Entry point overrides the lifting so that slice(_,b) == b
template <class A, class B>
CUTE_HOST_DEVICE constexpr
auto
slice(A const& a, B const& b)
{
if constexpr (is_tuple<A>::value) {
static_assert(tuple_size<A>::value == tuple_size<B>::value, "Mismatched Ranks");
return filter_tuple(a, b, [](auto const& x, auto const& y) { return detail::lift_slice(x,y); });
} else if constexpr (is_underscore<A>::value) {
return b;
} else {
return cute::tuple<>{};
}
CUTE_GCC_UNREACHABLE;
}
//
// Dice keeps only the elements of Tuple B that are paired with an Int
//
namespace detail {
template <class A, class B>
CUTE_HOST_DEVICE constexpr
auto
lift_dice(A const& a, B const& b)
{
if constexpr (is_tuple<A>::value) {
static_assert(tuple_size<A>::value == tuple_size<B>::value, "Mismatched Ranks");
return filter_tuple(a, b, [](auto const& x, auto const& y) { return lift_dice(x,y); });
} else if constexpr (is_underscore<A>::value) {
return cute::tuple<>{};
} else {
return cute::tuple<B>{b};
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
// Entry point overrides the lifting so that dice(1,b) == b
template <class A, class B>
CUTE_HOST_DEVICE constexpr
auto
dice(A const& a, B const& b)
{
if constexpr (is_tuple<A>::value) {
static_assert(tuple_size<A>::value == tuple_size<B>::value, "Mismatched Ranks");
return filter_tuple(a, b, [](auto const& x, auto const& y) { return detail::lift_dice(x,y); });
} else if constexpr (is_underscore<A>::value) {
return cute::tuple<>{};
} else {
return b;
}
CUTE_GCC_UNREACHABLE;
}
//
// Display utilities
//
CUTE_HOST_DEVICE void print(Underscore const&) {
printf("_");
}
#if !defined(__CUDACC_RTC__)
CUTE_HOST std::ostream& operator<<(std::ostream& os, Underscore const&) {
return os << "_";
}
#endif
} // end namespace cute
| include/cute/underscore.hpp/0 | {
"file_path": "include/cute/underscore.hpp",
"repo_id": "include",
"token_count": 2184
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply for SM75
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/arch/wmma.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
// CUDA Toolkit includes for nvcuda::wmma needed for binarized matrix multiply.
#include <mma.h>
#include "cutlass/wmma_array.h"
#endif
// CUTLASS includes
#include "cutlass/arch/mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
#if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))
#define CUTLASS_ARCH_MMA_SM75_SUPPORTED 1
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750))
#define CUTLASS_ARCH_MMA_SM75_ENABLED
#endif
#endif
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 1688 - FP16 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation - F16 = F16 * F16 + F16
template <>
struct Mma<
gemm::GemmShape<16, 8, 8>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 8>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 2>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
unsigned const *C = reinterpret_cast<unsigned const *>(&c);
unsigned *D = reinterpret_cast<unsigned *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 {%0,%1}, {%2,%3}, {%4}, {%5,%6};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 1688 - FP32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct Mma<
gemm::GemmShape<16, 8, 8>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 8>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 2>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]),
"r"(B[0]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Integer matrix multiply .8816 (8b)
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 4>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 4>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.u8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 4>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.s8.u8 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 4>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.u8.u8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Integer matrix multiply (8b) with SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 4>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.s8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 4>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.u8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 4>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.s8.u8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 4>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.u8.u8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Integer matrix multiply (4b)
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 32>,
32,
int4b_t,
layout::RowMajor,
int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 32>;
using ElementA = int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int4b_t, 8>;
using ElementB = int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 32>,
32,
uint4b_t,
layout::RowMajor,
int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 32>;
using ElementA = uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint4b_t, 8>;
using ElementB = int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.u4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 32>,
32,
int4b_t,
layout::RowMajor,
uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 32>;
using ElementA = int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int4b_t, 8>;
using ElementB = uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.s4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 32>,
32,
uint4b_t,
layout::RowMajor,
uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 32>;
using ElementA = uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint4b_t, 8>;
using ElementB = uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.u4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Integer matrix multiply (4b) - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 32>,
32,
int4b_t,
layout::RowMajor,
int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8, 8, 32>;
using ElementA = int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int4b_t, 8>;
using ElementB = int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.s4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 32>,
32,
uint4b_t,
layout::RowMajor,
int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8, 8, 32>;
using ElementA = uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint4b_t, 8>;
using ElementB = int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.u4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 32>,
32,
int4b_t,
layout::RowMajor,
uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8, 8, 32>;
using ElementA = int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int4b_t, 8>;
using ElementB = uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.s4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 32>,
32,
uint4b_t,
layout::RowMajor,
uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8, 8, 32>;
using ElementA = uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint4b_t, 8>;
using ElementB = uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.u4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// b1 ^ b1 + s32 => s32
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <>
struct Mma<
gemm::GemmShape<8,8,128>,
32,
uint1b_t,
layout::RowMajor,
uint1b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpXorPopc> {
using Shape = gemm::GemmShape<8,8,128>;
using ElementA = uint1b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint1b_t, 32>;
using ElementB = uint1b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint1b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpXorPopc;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
using WmmaFragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
nvcuda::wmma::experimental::precision::b1,
nvcuda::wmma::row_major>;
using WmmaFragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
nvcuda::wmma::experimental::precision::b1,
nvcuda::wmma::col_major>;
using WmmaFragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
int>;
WmmaFragmentA const & A = reinterpret_cast<WmmaFragmentA const &>(a);
WmmaFragmentB const & B = reinterpret_cast<WmmaFragmentB const &>(b);
WmmaFragmentC const & C = reinterpret_cast<WmmaFragmentC const &>(c);
WmmaFragmentC & D = reinterpret_cast<WmmaFragmentC &>(d);
nvcuda::wmma::bmma_sync(D, A, B, C, nvcuda::wmma::experimental::bmmaBitOpXOR,
nvcuda::wmma::experimental::bmmaAccumulateOpPOPC);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
CUTLASS_NOT_IMPLEMENTED(); // WMMA must be supported to issue binary matrix multiply-accumulate instructions.
#endif // defined(CUTLASS_ARCH_WMMA_ENABLED)
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| include/cutlass/arch/mma_sm75.h/0 | {
"file_path": "include/cutlass/arch/mma_sm75.h",
"repo_id": "include",
"token_count": 13405
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types
and is safe to use in a union.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically sized array for any data type
template <
typename T,
int N
>
struct Array<T, N, false> {
static constexpr int kSizeBits = sizeof_bits<T>::value * N;
/// Storage type
using Storage = typename platform::conditional<
((kSizeBits % 32) != 0),
typename platform::conditional<
((kSizeBits % 16) != 0),
uint8_t,
uint16_t
>::type,
uint32_t
>::type;
/// Element type
using Element = T;
/// Number of logical elements per stored object
static constexpr int kElementsPerStoredItem = int(sizeof(Storage) * 8) / sizeof_bits<T>::value;
/// Number of storage elements
static constexpr size_t kStorageElements = (N + kElementsPerStoredItem - 1) / kElementsPerStoredItem;
/// Number of logical elements
static constexpr size_t kElements = N;
/// Bitmask for covering one item
static constexpr Storage kMask = ((Storage(1) << sizeof_bits<T>::value) - 1);
//
// C++ standard members with pointer types removed
//
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef value_type *pointer;
typedef value_type const *const_pointer;
//
// References
//
/// Reference object inserts or extracts sub-byte items
class reference {
/// Pointer to storage element
Storage *ptr_{nullptr};
/// Index into elements packed into Storage object
int idx_{0};
public:
reference() = default;
/// Ctor
CUTLASS_HOST_DEVICE
reference(Storage *ptr, int idx = 0): ptr_(ptr), idx_(idx) { }
/// Assignment
CUTLASS_HOST_DEVICE
reference &operator=(T x) {
// `*ptr_ & kUpdateMask` will read ptr_ before write to it
// This means code pattern like
//
// ```cpp
// Array<half_t, N> result;
// result[0] = xxx;
// ```
//
// Will leads to compiler warning on use of unintialized member variable. Although we know
// this read of uninitialized member variable is harmeless.
#if defined(__clang__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wuninitialized"
#elif defined(__GNUC__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wuninitialized"
# pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
Storage item = (reinterpret_cast<Storage const &>(x) & kMask);
Storage kUpdateMask = Storage(~(kMask << (idx_ * sizeof_bits<T>::value)));
*ptr_ = Storage(((*ptr_ & kUpdateMask) | (item << idx_ * sizeof_bits<T>::value)));
#if defined(__clang__)
# pragma clang diagnostic pop
#elif defined(__GNUC__)
# pragma GCC diagnostic pop
#endif
return *this;
}
CUTLASS_HOST_DEVICE
T get() const {
Storage item = Storage((*ptr_ >> (idx_ * sizeof_bits<T>::value)) & kMask);
return reinterpret_cast<T const &>(item);
}
/// Extract
CUTLASS_HOST_DEVICE
operator T() const {
return get();
}
/// Explicit cast to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(get());
}
/// Explicit cast to float
CUTLASS_HOST_DEVICE
explicit operator float() const {
return float(get());
}
};
/// Reference object extracts sub-byte items
class const_reference {
/// Pointer to storage element
Storage const *ptr_{nullptr};
/// Index into elements packed into Storage object
int idx_{0};
public:
const_reference() = default;
/// Ctor
CUTLASS_HOST_DEVICE
const_reference(Storage const *ptr, int idx = 0): ptr_(ptr), idx_(idx) { }
CUTLASS_HOST_DEVICE
const T get() const {
Storage item = (*ptr_ >> (idx_ * sizeof_bits<T>::value)) & kMask;
return reinterpret_cast<T const &>(item);
}
/// Extract
CUTLASS_HOST_DEVICE
operator T() const {
Storage item = Storage(Storage(*ptr_ >> Storage(idx_ * sizeof_bits<T>::value)) & kMask);
return reinterpret_cast<T const &>(item);
}
/// Explicit cast to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(get());
}
/// Explicit cast to float
CUTLASS_HOST_DEVICE
explicit operator float() const {
return float(get());
}
};
//
// Iterators
//
/// Bidirectional iterator over elements
class iterator {
/// Pointer to storage element
Storage *ptr_{nullptr};
/// Index into elements packed into Storage object
int idx_{0};
public:
iterator() = default;
CUTLASS_HOST_DEVICE
iterator(Storage *ptr, int idx = 0): ptr_(ptr), idx_(idx) { }
CUTLASS_HOST_DEVICE
iterator &operator++() {
++idx_;
if (idx_ == kElementsPerStoredItem) {
++ptr_;
idx_ = 0;
}
return *this;
}
CUTLASS_HOST_DEVICE
iterator &operator--() {
if (!idx_) {
--ptr_;
idx_ = kElementsPerStoredItem - 1;
}
else {
--idx_;
}
return *this;
}
CUTLASS_HOST_DEVICE
iterator operator++(int) {
iterator ret(*this);
++idx_;
if (idx_ == kElementsPerStoredItem) {
++ptr_;
idx_ = 0;
}
return ret;
}
CUTLASS_HOST_DEVICE
iterator operator--(int) {
iterator ret(*this);
if (!idx_) {
--ptr_;
idx_ = kElementsPerStoredItem - 1;
}
else {
--idx_;
}
return ret;
}
CUTLASS_HOST_DEVICE
reference operator*() const {
return reference(ptr_, idx_);
}
CUTLASS_HOST_DEVICE
bool operator==(iterator const &other) const {
return ptr_ == other.ptr_ && idx_ == other.idx_;
}
CUTLASS_HOST_DEVICE
bool operator!=(iterator const &other) const {
return !(*this == other);
}
};
/// Bidirectional constant iterator over elements
class const_iterator {
/// Pointer to storage element
Storage const *ptr_{nullptr};
/// Index into elements packed into Storage object
int idx_{0};
public:
const_iterator() = default;
CUTLASS_HOST_DEVICE
const_iterator(Storage const *ptr, int idx = 0): ptr_(ptr), idx_(idx) { }
CUTLASS_HOST_DEVICE
iterator &operator++() {
++idx_;
if (idx_ == kElementsPerStoredItem) {
++ptr_;
idx_ = 0;
}
return *this;
}
CUTLASS_HOST_DEVICE
iterator &operator--() {
if (!idx_) {
--ptr_;
idx_ = kElementsPerStoredItem - 1;
}
else {
--idx_;
}
return *this;
}
CUTLASS_HOST_DEVICE
iterator operator++(int) {
iterator ret(*this);
++idx_;
if (idx_ == kElementsPerStoredItem) {
++ptr_;
idx_ = 0;
}
return ret;
}
CUTLASS_HOST_DEVICE
iterator operator--(int) {
iterator ret(*this);
if (!idx_) {
--ptr_;
idx_ = kElementsPerStoredItem - 1;
}
else {
--idx_;
}
return ret;
}
CUTLASS_HOST_DEVICE
const_reference operator*() const {
return const_reference(ptr_, idx_);
}
CUTLASS_HOST_DEVICE
bool operator==(iterator const &other) const {
return ptr_ == other.ptr_ && idx_ == other.idx_;
}
CUTLASS_HOST_DEVICE
bool operator!=(iterator const &other) const {
return !(*this == other);
}
};
/// Bidirectional iterator over elements
class reverse_iterator {
/// Pointer to storage element
Storage *ptr_{nullptr};
/// Index into elements packed into Storage object
int idx_{0};
public:
reverse_iterator() = default;
CUTLASS_HOST_DEVICE
reverse_iterator(Storage *ptr, int idx = 0): ptr_(ptr), idx_(idx) { }
};
/// Bidirectional constant iterator over elements
class const_reverse_iterator {
/// Pointer to storage element
Storage const *ptr_{nullptr};
/// Index into elements packed into Storage object
int idx_{0};
public:
const_reverse_iterator() = default;
CUTLASS_HOST_DEVICE
const_reverse_iterator(Storage const *ptr, int idx = 0): ptr_(ptr), idx_(idx) { }
};
/// Efficient clear method
CUTLASS_HOST_DEVICE
void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < int(kStorageElements); ++i) {
storage[i] = Storage(0);
}
}
CUTLASS_HOST_DEVICE
reference at(size_type pos) {
return reference(storage + pos / kElementsPerStoredItem, pos % kElementsPerStoredItem);
}
CUTLASS_HOST_DEVICE
const_reference at(size_type pos) const {
return const_reference(storage + pos / kElementsPerStoredItem, pos % kElementsPerStoredItem);
}
CUTLASS_HOST_DEVICE
reference operator[](size_type pos) {
return at(pos);
}
CUTLASS_HOST_DEVICE
const_reference operator[](size_type pos) const {
return at(pos);
}
CUTLASS_HOST_DEVICE
reference front() {
return at(0);
}
CUTLASS_HOST_DEVICE
const_reference front() const {
return at(0);
}
CUTLASS_HOST_DEVICE
reference back() {
return reference(storage + kStorageElements - 1, kElementsPerStoredItem - 1);
}
CUTLASS_HOST_DEVICE
const_reference back() const {
return const_reference(storage + kStorageElements - 1, kElementsPerStoredItem - 1);
}
CUTLASS_HOST_DEVICE
pointer data() {
return reinterpret_cast<pointer>(storage);
}
CUTLASS_HOST_DEVICE
const_pointer data() const {
return reinterpret_cast<const_pointer>(storage);
}
CUTLASS_HOST_DEVICE
Storage * raw_data() {
return storage;
}
CUTLASS_HOST_DEVICE
Storage const * raw_data() const {
return storage;
}
CUTLASS_HOST_DEVICE
constexpr bool empty() const {
return !kElements;
}
CUTLASS_HOST_DEVICE
constexpr size_type size() const {
return kElements;
}
CUTLASS_HOST_DEVICE
constexpr size_type max_size() const {
return kElements;
}
CUTLASS_HOST_DEVICE
void fill(T const &value) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerStoredItem; ++i) {
reference ref(storage, i);
ref = value;
}
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < kStorageElements; ++i) {
storage[i] = storage[0];
}
}
CUTLASS_HOST_DEVICE
iterator begin() {
return iterator(storage);
}
CUTLASS_HOST_DEVICE
const_iterator cbegin() const {
return const_iterator(storage);
}
CUTLASS_HOST_DEVICE
iterator end() {
return iterator(storage + kStorageElements);
}
CUTLASS_HOST_DEVICE
const_iterator cend() const {
return const_iterator(storage + kStorageElements);
}
CUTLASS_HOST_DEVICE
reverse_iterator rbegin() {
return reverse_iterator(storage + kStorageElements);
}
CUTLASS_HOST_DEVICE
const_reverse_iterator crbegin() const {
return const_reverse_iterator(storage + kStorageElements);
}
CUTLASS_HOST_DEVICE
reverse_iterator rend() {
return reverse_iterator(storage);
}
CUTLASS_HOST_DEVICE
const_reverse_iterator crend() const {
return const_reverse_iterator(storage);
}
private:
/// Internal storage
Storage storage[kStorageElements];
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/array_subbyte.h/0 | {
"file_path": "include/cutlass/array_subbyte.h",
"repo_id": "include",
"token_count": 5033
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This file contains definitions and utility functions for describing convolution problem sizes.
Conv3dProblem desciption:
activation (NDHWC),
filter (KTRSC),
output (NZPQK),
pading (pad_d, pad_h, pad_w),
stride (stride_d, stride_h, stride_w),
dilation (dilation_d, dilation_h, dilation_w).
Free functions to map:
Map tensor extents (Conv3d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_extent(ConvolutionOperator)
Map tensor sizes (Conv3d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_size(ConvolutionOperator)
Map tensor problem sizes (Conv3d -> ImplicitGemm): implicit_gemm_problem_size(ConvolutionOperator)
*/
#pragma once
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
namespace cutlass {
namespace conv {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Problem size structure
struct Conv3dProblemSize : public Conv2dProblemSize {
//
// Type definitions
//
// 3D coordinate for padding, stride, and dilation in (d, h, w) dimensions
using Coord3D = Coord<3>;
//
// Data members
//
// Conv3d strictly problem size parameters
int D, T, Z; // input depth, filter depth, output depth
int pad_d; // padding in depth dimension
int stride_d; // stride in depth dimension
int dilation_d; // dilation in depth dimension
//
// Methods
//
public:
CUTLASS_HOST_DEVICE
Conv3dProblemSize():
Conv2dProblemSize(),
D(0), T(0), Z(0),
pad_d(0),
stride_d(1),
dilation_d(1) { }
/// Constructor for default padding, stride, dilation, and split-K
CUTLASS_HOST_DEVICE
Conv3dProblemSize(
int N,
int D,
int H,
int W,
int C,
int Z,
int P,
int Q,
int K,
int T,
int R,
int S,
Mode mode
):
Conv2dProblemSize(N, H, W, C, P, Q, K, R, S, mode),
D(D), T(T), Z(Z),
pad_d(T / 2), stride_d(1), dilation_d(1) { }
/// Constructor
CUTLASS_HOST_DEVICE
Conv3dProblemSize(
int N,
int D,
int H,
int W,
int C,
int K,
int T,
int R,
int S,
int Z,
int P,
int Q,
int pad_d,
int pad_h,
int pad_w,
int stride_d,
int stride_h,
int stride_w,
int dilation_d,
int dilation_h,
int dilation_w,
Mode mode,
int split_k_slices = 1,
int groups = 1
):
Conv2dProblemSize(
N, H, W, C, K, R, S, P, Q,
pad_h, pad_w,
stride_h, stride_w,
dilation_h, dilation_w,
mode, split_k_slices, groups),
D(D), T(T), Z(Z),
pad_d(pad_d), stride_d(stride_d), dilation_d(dilation_d) { }
/// Constructs convolution problem size from cutlass Tensor5DCoord and Coord3D
// set *user-defined* output size and sets Z, P, and Q (include all data members in ctor)
CUTLASS_HOST_DEVICE
Conv3dProblemSize(
cutlass::Tensor5DCoord input_size, // NDHWC
cutlass::Tensor5DCoord filter_size, // KTRSC
Coord3D padding, // pad_d, pad_h, pad_w
Coord3D stride, // stride_d, stride_h, stride_w
Coord3D dilation, // dilation_d, dilation_h, dilation_w
cutlass::Tensor5DCoord output_size, // NZPQK
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation,
int split_k_slices = 1,
int groups = 1
):
Conv2dProblemSize(
{input_size.n(), input_size.h(), input_size.w(), input_size.c()},
{filter_size.n(), filter_size.h(), filter_size.w(), filter_size.c()},
{padding[1], padding[1], padding[2], padding[2]},
{stride[1], stride[2]},
{dilation[1], dilation[2]},
{output_size.n(), output_size.h(), output_size.w(), output_size.c()},
mode, split_k_slices, groups),
D(input_size.d()), T(filter_size.d()), Z(output_size.d()),
pad_d(padding[0]), stride_d(stride[0]), dilation_d(dilation[0]) { }
/// Constructs convolution problem size from cutlass Tensor5DCoord and Coord3D
// *computes* output size and sets Z, P and Q (include all data members in ctor)
CUTLASS_HOST_DEVICE
Conv3dProblemSize(
cutlass::Tensor5DCoord input_size, // NDHWC
cutlass::Tensor5DCoord filter_size, // KTRSC
Coord3D padding, // pad_d, pad_h, pad_w
Coord3D stride, // stride_d, stride_h, stride_w
Coord3D dilation, // dilation_d, dilation_h, dilation_w
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation,
int split_k_slices = 1,
int groups = 1
):
Conv2dProblemSize(
{input_size.n(), input_size.h(), input_size.w(), input_size.c()},
{filter_size.n(), filter_size.h(), filter_size.w(), filter_size.c()},
{padding[1], padding[1], padding[2], padding[2]},
{stride[1], stride[2]},
{dilation[1], dilation[2]},
mode, split_k_slices, groups),
D(input_size.d()), T(filter_size.d()),
pad_d(padding[0]), stride_d(stride[0]), dilation_d(dilation[0])
{
// set output Z
Z = ((D + pad_d * 2 - T * dilation_d) / stride_d) + 1;
}
/// Constructs convolution problem size from cutlass Tensor5DCoord, Coord3D
// *computes* output size and sets Z, P and Q (include all data members in ctor)
CUTLASS_HOST_DEVICE
Conv3dProblemSize(
cutlass::Tensor5DCoord input_size, // NDHWC
cutlass::Tensor5DCoord filter_size, // KTRSC
CUTLASS_STL_NAMESPACE::tuple<Coord3D, Coord3D> padding, // Coord3D {pad_d, pad_h, pad_w} & Coord3D {far pad_d, pad_h, pad_w} to calculate o/p/q
Coord3D stride, // stride_d, stride_h, stride_w
Coord3D dilation, // dilation_d, dilation_h, dilation_w
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation,
int split_k_slices = 1,
int groups = 1
):
Conv2dProblemSize(
{input_size.n(), input_size.h(), input_size.w(), input_size.c()},
{filter_size.n(), filter_size.h(), filter_size.w(), filter_size.c()},
{CUTLASS_STL_NAMESPACE::get<0>(padding)[1], CUTLASS_STL_NAMESPACE::get<1>(padding)[1],
CUTLASS_STL_NAMESPACE::get<0>(padding)[2], CUTLASS_STL_NAMESPACE::get<1>(padding)[2]},
{stride[1], stride[2]},
{dilation[1], dilation[2]},
mode, split_k_slices, groups),
D(input_size.d()), T(filter_size.d()),
pad_d(CUTLASS_STL_NAMESPACE::get<0>(padding)[0]), stride_d(stride[0]), dilation_d(dilation[0])
{
// set output Z
Z = ((D + pad_d + CUTLASS_STL_NAMESPACE::get<1>(padding)[0] - T * dilation_d) / stride_d) + 1;
}
/// Equality operator (ignores mode and split_k_slice)
CUTLASS_HOST_DEVICE
bool operator==(Conv3dProblemSize const &conv) const {
return (
(N == conv.N) && (D == conv.D) && (H == conv.H) && (W == conv.W) && (C == conv.C) &&
(K == conv.K) && (T == conv.T) && (R == conv.R) && (S == conv.S) &&
(Z == conv.Z) &&(P == conv.P) && (Q == conv.Q) &&
(pad_d == conv.pad_d) && (pad_h == conv.pad_h) && (pad_w == conv.pad_w) &&
(stride_d == conv.stride_d) && (stride_h == conv.stride_h) && (stride_w == conv.stride_w) &&
(dilation_d == conv.dilation_d) && (dilation_h == conv.dilation_h) && (dilation_w == conv.dilation_w)
);
}
/// Inequality operator
CUTLASS_HOST_DEVICE
bool operator!=(Conv3dProblemSize const &rhs) const {
return !(*this == rhs);
}
// Reset covolution mode in the problem
CUTLASS_HOST_DEVICE
Conv3dProblemSize reset_mode(cutlass::conv::Mode mode_) {
Conv3dProblemSize tmp(*this);
tmp.mode = mode_;
return tmp;
}
// Reset covolution mode in the problem
CUTLASS_HOST_DEVICE
Conv3dProblemSize reset_split_k_slices(int split_k_slices_) {
Conv3dProblemSize tmp(*this);
tmp.split_k_slices = split_k_slices_;
return tmp;
}
/// Returns activation extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord activation_extent() const {
return cutlass::Tensor5DCoord ({N, D, H, W, C});
}
/// Returns filter extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord filter_extent(bool is_deconv = false) const {
return is_deconv ? cutlass::Tensor5DCoord ({C, T, R, S, K})
: cutlass::Tensor5DCoord ({K, T, R, S, C});
}
/// Returns output extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord output_extent() const {
return cutlass::Tensor5DCoord ({N, Z, P, Q, K});
}
/// Returns activation size in number of elements
CUTLASS_HOST_DEVICE
int64_t activation_size() const {
return (N * D * H * W * C);
}
/// Returns filter size in number of elements
CUTLASS_HOST_DEVICE
int64_t filter_size() const {
return (K * T * R * S * C);
}
/// Returns output size in number of elements
CUTLASS_HOST_DEVICE
int64_t output_size() const {
return (N * Z * P * Q * K);
}
/// Returns padding as Coord3D
CUTLASS_HOST_DEVICE
Coord3D padding() const {
return Coord3D ({pad_d, pad_h, pad_w});
}
/// Returns stride as MatrixCoord
CUTLASS_HOST_DEVICE
Coord3D stride() const {
return Coord3D ({stride_d, stride_h, stride_w});
}
/// Returns dilation as MatrixCoord
CUTLASS_HOST_DEVICE
Coord3D dilation() const {
return Coord3D ({dilation_d, dilation_h, dilation_w});
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// ImplicitGemm helper functions //
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Determine the problem size of the implicit GEMM operation
CUTLASS_HOST_DEVICE
cutlass::gemm::GemmCoord implicit_gemm_problem_size(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
// Compute problem size
switch (conv_operator) {
case Operator::kFprop:
return gemm::GemmCoord(
problem_size.N * problem_size.Z * problem_size.P * problem_size.Q,
problem_size.K,
problem_size.T * problem_size.R * problem_size.S * problem_size.C
);
case Operator::kDeconv:
case Operator::kDgrad:
return gemm::GemmCoord(
problem_size.N * problem_size.D * problem_size.H * problem_size.W,
problem_size.C,
problem_size.T * problem_size.R * problem_size.S * problem_size.K
);
case Operator::kWgrad:
return gemm::GemmCoord(
problem_size.K,
problem_size.T * problem_size.R * problem_size.S * problem_size.C,
problem_size.N * problem_size.Z * problem_size.P * problem_size.Q
);
default:
break;
}
return gemm::GemmCoord();
}
// Determine the number of gemm_k iterations for conv2d problem using implicit gemm algorithm
CUTLASS_HOST_DEVICE
int implicit_gemm_k_iterations(
Operator conv_operator,
int threadblock_K,
Conv3dProblemSize const &problem_size,
IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic,
GroupMode group_mode = GroupMode::kNone,
int threadblock_N = 0) {
int iterations = 0;
int elements_per_split_k_slice = 0;
if (group_mode == GroupMode::kNone) {
switch (conv_operator) {
case Operator::kFprop:
elements_per_split_k_slice = (problem_size.C + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = problem_size.T * problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K);
break;
case Operator::kDeconv:
case Operator::kDgrad:
elements_per_split_k_slice = (problem_size.K + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = problem_size.T * problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K);
break;
case Operator::kWgrad:
elements_per_split_k_slice = (problem_size.N * problem_size.Z * problem_size.P * problem_size.Q + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = (elements_per_split_k_slice + threadblock_K - 1) / threadblock_K;
break;
default:
break;
}
} else if (group_mode == GroupMode::kDepthwise) {
int channels_per_cta = threadblock_N;
if (algorithm == IteratorAlgorithm::kAnalytic) {
switch (conv_operator) {
case Operator::kFprop:
iterations = problem_size.T * problem_size.R * problem_size.S *
((channels_per_cta + threadblock_K - 1) / threadblock_K);
break;
default:
break;
}
}
}
return iterations;
}
////////////////////////////////////////////////////////////////////////////////
// Mapping function (ImplicitGemm A, B, C -> Conv Activation, Filter, Output)
////////////////////////////////////////////////////////////////////////////////
/// Returns ImplicitGemm tensor A extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord implicit_gemm_tensor_a_extent(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.activation_extent();
case cutlass::conv::Operator::kDeconv:
case cutlass::conv::Operator::kDgrad: return problem_size.output_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.output_extent();
default : break;
}
return cutlass::Tensor5DCoord();
}
/// Returns ImplicitGemm tensor B extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord implicit_gemm_tensor_b_extent(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.filter_extent();
case cutlass::conv::Operator::kDeconv: return problem_size.filter_extent(true);
case cutlass::conv::Operator::kDgrad: return problem_size.filter_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.activation_extent();
default : break;
}
return cutlass::Tensor5DCoord();
}
/// Returns ImplicitGemm tensor C extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord implicit_gemm_tensor_c_extent(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.output_extent();
case cutlass::conv::Operator::kDeconv:
case cutlass::conv::Operator::kDgrad: return problem_size.activation_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.filter_extent();
default : break;
}
return cutlass::Tensor5DCoord();
}
/// Returns ImplicitGemm tensor A size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_a_size(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.activation_size();
case cutlass::conv::Operator::kDeconv:
case cutlass::conv::Operator::kDgrad: return problem_size.output_size();
case cutlass::conv::Operator::kWgrad: return problem_size.output_size();
default : break;
}
return 0;
}
/// Returns ImplicitGemm tensor B size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_b_size(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.filter_size();
case cutlass::conv::Operator::kDeconv:
case cutlass::conv::Operator::kDgrad: return problem_size.filter_size();
case cutlass::conv::Operator::kWgrad: return problem_size.activation_size();
default : break;
}
return 0;
}
/// Returns ImplicitGemm tensor C size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_c_size(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.output_size();
case cutlass::conv::Operator::kDeconv:
case cutlass::conv::Operator::kDgrad: return problem_size.activation_size();
case cutlass::conv::Operator::kWgrad: return problem_size.filter_size();
default : break;
}
return 0;
}
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/conv3d_problem_size.h/0 | {
"file_path": "include/cutlass/conv/conv3d_problem_size.h",
"repo_id": "include",
"token_count": 7140
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_fixed_channels.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_few_channels.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_fixed_channels.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_few_channels.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dGroupFprop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::GroupMode GroupMode,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kUnity,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
> struct DefaultConv2dGroupFprop;
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassTensorOp convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dGroupFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline that supports all GroupMode.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::GroupMode GroupMode,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dGroupFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
GroupMode,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
static_assert(platform::is_same<LayoutA, cutlass::layout::TensorNHWC>::value,
"Current group conv only support NHWC layout");
static_assert(platform::is_same<LayoutB, cutlass::layout::TensorNHWC>::value,
"Current group conv only support NHWC layout");
static_assert(platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value,
"Current group conv only support NHWC layout");
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA,
GroupMode
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB,
GroupMode
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv2dProblemSize,
GroupMode
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dGroupFprop specialization for Analytic IteratorAlgorithm and
/// 2 stage pipeline that supports all GroupMode.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::GroupMode GroupMode,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dGroupFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
GroupMode,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
static_assert(platform::is_same<LayoutA, cutlass::layout::TensorNHWC>::value,
"Current group conv only support NHWC layout");
static_assert(platform::is_same<LayoutB, cutlass::layout::TensorNHWC>::value,
"Current group conv only support NHWC layout");
static_assert(platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value,
"Current group conv only support NHWC layout");
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA,
GroupMode
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB,
GroupMode
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv2dProblemSize,
GroupMode
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dGroupFprop specialization for Optimized IteratorAlgorithm and multistage
/// pipeline that supports GroupMode::kSingleGroup.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dGroupFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
GroupMode::kSingleGroup,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
static_assert(platform::is_same<LayoutA, cutlass::layout::TensorNHWC>::value,
"Current group conv only support NHWC layout");
static_assert(platform::is_same<LayoutB, cutlass::layout::TensorNHWC>::value,
"Current group conv only support NHWC layout");
static_assert(platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value,
"Current group conv only support NHWC layout");
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv2dProblemSize,
GroupMode::kSingleGroup
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dGroupFprop specialization for Optimized IteratorAlgorithm and
/// 2 stage pipeline that supports GroupMode::kSingleGroup.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dGroupFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
GroupMode::kSingleGroup,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
static_assert(platform::is_same<LayoutA, cutlass::layout::TensorNHWC>::value,
"Current group conv only support NHWC layout");
static_assert(platform::is_same<LayoutB, cutlass::layout::TensorNHWC>::value,
"Current group conv only support NHWC layout");
static_assert(platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value,
"Current group conv only support NHWC layout");
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv2dProblemSize,
GroupMode::kSingleGroup
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_conv2d_group_fprop.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_conv2d_group_fprop.h",
"repo_id": "include",
"token_count": 6519
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined Implicit GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/semaphore.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/epilogue/threadblock/output_iterator_parameter.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad)
typename ConvProblemSize_ = Conv2dProblemSize ///! Convolutional operator on 2D or 3D problem
>
struct ImplicitGemmConvolutionStridedDgrad {
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static Operator const kConvolutionalOperator = ConvOperator;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename EpilogueOutputOp::ElementOutput;
/// Set output tensor C layout
using LayoutC = LayoutA;
using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator;
using ElementCompute = typename EpilogueOutputOp::ElementCompute;
using WarpMmaOperator = typename Mma::Policy::Operator;
using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator;
using MathOperator = typename ArchMmaOperator::Operator;
using OperatorClass = typename WarpMmaOperator::OperatorClass;
using ArchTag = typename WarpMmaOperator::ArchTag;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename WarpMmaOperator::Shape;
using InstructionShape = typename ArchMmaOperator::Shape;
static int const kStages = Mma::kStages;
static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm;
static StrideSupport const kStrideSupport = Mma::IteratorA::kStrideSupport;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using TensorRefA = typename Mma::IteratorA::TensorRef;
using TensorRefB = typename Mma::IteratorB::TensorRef;
using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>;
/// Check iterator A and B convolution dimension are the same and
// set device::ImplicitGemmConvolution::kConvDim
static_assert(Mma::IteratorA::kConvDim == Mma::IteratorB::kConvDim,
"Convolution on different different dimensions is not supported");
static int const kConvDim = Mma::IteratorA::kConvDim;
/// Conv dimension and problem size structure (Conv2d or Conv3d)
using ConvProblemSize = ConvProblemSize_;
static conv::GroupMode const kGroupMode = conv::GroupMode::kNone;
/// Wgrad C stride idx for implicit gemm algorithm
// Conv2d row-major matrix C (KxRSC)
// Conv3d row-major matrix C (KxTRSC)
static int const kWgradCStrideIdx =
platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value ? 2 : 3;
/// This chooses the appropriate stride element of the C tensor.
static int const kTensorCStrideIdx =
(kConvolutionalOperator == conv::Operator::kWgrad ? kWgradCStrideIdx : 0);
// Strided dgrad uses a specialized threadblock swizzle for functionality and performance
static_assert((platform::is_same<ThreadblockSwizzle,
threadblock::StridedDgradHorizontalThreadblockSwizzle>::value) ||
(platform::is_same<ThreadblockSwizzle,
threadblock::StridedDgradIdentityThreadblockSwizzle<1>>::value) ||
(platform::is_same<ThreadblockSwizzle,
threadblock::StridedDgradIdentityThreadblockSwizzle<4>>::value) ||
(platform::is_same<ThreadblockSwizzle,
threadblock::StridedDgradIdentityThreadblockSwizzle<8>>::value),
"Needs ThreadblockSwizzle type specialized for strided dgrad");
//
//
//
using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter<
LayoutC,
typename Epilogue::OutputTileIterator::Layout,
TensorRefC,
ConvOperator,
ConvProblemSize
>;
/// Argument structure
struct Arguments {
//
// Data members
//
ConvProblemSize problem_size{};
TensorRefA ref_A{};
TensorRefB ref_B{};
TensorRefC ref_C{};
TensorRefC ref_D{};
typename EpilogueOutputOp::Params output_op{};
SplitKMode split_k_mode{};
//
// Methods
//
/// Default ctor
Arguments() = default;
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size
):
problem_size(problem_size) { }
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size,
TensorRefA const & ref_A,
TensorRefB const & ref_B,
TensorRefC const & ref_C,
TensorRefC const & ref_D,
typename EpilogueOutputOp::Params const & output_op,
SplitKMode const & split_k_mode = SplitKMode::kSerial
):
problem_size(problem_size),
ref_A(ref_A),
ref_B(ref_B),
ref_C(ref_C),
ref_D(ref_D),
output_op(output_op),
split_k_mode(split_k_mode)
{
}
};
/// Parameters structure
struct Params {
ConvProblemSize problem_size{};
cutlass::gemm::GemmCoord grid_tiled_shape{};
int swizzle_log_tile{0};
FastDivmod stride_h_divmod{};
FastDivmod stride_w_divmod{};
int gemm_k_iterations{0};
typename Mma::IteratorA::Params iterator_A{};
typename Mma::IteratorA::Element const *ptr_A = nullptr;
typename Mma::IteratorB::Params iterator_B{};
typename Mma::IteratorB::Element const *ptr_B = nullptr;
typename Epilogue::OutputTileIterator::Params iterator_C{};
typename Epilogue::OutputTileIterator::Element *ptr_C = nullptr;
typename Epilogue::OutputTileIterator::Params iterator_D{};
typename Epilogue::OutputTileIterator::Element *ptr_D = nullptr;
typename EpilogueOutputOp::Params output_op {};
int *semaphore = nullptr;
SplitKMode split_k_mode {};
//
// Methods
//
Params() = default;
///
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
int *semaphore = nullptr
):
problem_size(args.problem_size),
stride_h_divmod(args.problem_size.stride_h),
stride_w_divmod(args.problem_size.stride_w),
iterator_A(Mma::IteratorA::getParams(args.problem_size, args.ref_A.layout())),
ptr_A(args.ref_A.data()),
iterator_B(args.problem_size, args.ref_B.layout()),
ptr_B(args.ref_B.data()),
iterator_C(ConvOutputIteratorParameter::layout(args.ref_C), args.problem_size, ThreadblockShape::kM),
ptr_C(args.ref_C.data()),
iterator_D(ConvOutputIteratorParameter::layout(args.ref_D), args.problem_size, ThreadblockShape::kM),
ptr_D(args.ref_D.data()),
output_op(args.output_op),
semaphore(semaphore),
split_k_mode(args.split_k_mode)
{
gemm_k_iterations = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape::kK, args.problem_size);
ThreadblockSwizzle threadblock_swizzle;
grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
kConvolutionalOperator,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices);
swizzle_log_tile = threadblock_swizzle.get_log_tile(grid_tiled_shape);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
ImplicitGemmConvolutionStridedDgrad() { }
/// Executes one ImplicitGEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) {
return;
}
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Compute starting filter position for strided dgrad
int tile_m_per_filter = strided_dgrad_tile_m_per_filter(params.problem_size,
ThreadblockShape::kM);
int filter_tile_m = (threadblock_tile_idx.m() / tile_m_per_filter);
// The subsequent fast_divmod() operations are equivalent to the following logical computation:
//
// int start_r = filter_tile_m / (params.problem_size.stride_w);
// int start_s = filter_tile_m % (params.problem_size.stride_w);
int start_r, start_s;
params.stride_w_divmod(start_r, start_s, filter_tile_m);
int filter_r = start_r;
int filter_s = start_s;
if (params.problem_size.mode == Mode::kConvolution) {
filter_r = (params.problem_size.R - 1 - filter_r);
filter_s = (params.problem_size.S - 1 - filter_s);
}
// Starting h, w positions for filter position in gemm_k=0
int start_h, start_w;
strided_dgrad_starting_coords(
params.problem_size,
params.stride_h_divmod, params.stride_w_divmod,
filter_r, filter_s,
start_h, start_w);
if (start_h >= params.problem_size.H || start_w >= params.problem_size.W) {
return;
}
typename Mma::FragmentC accumulators;
accumulators.clear();
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
// Check if CTA contributes valid MMA (Dy * w) and accumulator will be non-zero after MMA
if (start_r < params.problem_size.R && start_s < params.problem_size.S) {
// Scale gemm_k_iterations for strided dgrad
int gemm_k_iterations = (params.gemm_k_iterations / (params.problem_size.R * params.problem_size.S)
) * params.problem_size.num_gemm_k_filter_positions(start_r, start_s);
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.iterator_A,
params.problem_size,
params.ptr_A,
thread_idx,
params.stride_h_divmod, params.stride_w_divmod,
start_r, start_s,
MatrixCoord(
threadblock_tile_idx.m() * Mma::Shape::kM,
threadblock_tile_idx.k() * Mma::Shape::kK
)
);
typename Mma::IteratorB iterator_B(
params.iterator_B,
params.problem_size,
params.ptr_B,
thread_idx,
start_r, start_s,
MatrixCoord(
threadblock_tile_idx.k() * Mma::Shape::kK,
threadblock_tile_idx.n() * Mma::Shape::kN
)
);
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
}
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
// Construct the semaphore.
int block_idx = threadblock_tile_idx.m() + threadblock_tile_idx.n() * params.grid_tiled_shape.m();
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// Compute logical position within grid
threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// If performing a reduction via split-K, fetch the initial synchronization
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_idx.k(), params.grid_tiled_shape.k());
}
MatrixCoord threadblock_offset(
threadblock_tile_idx.m() * Mma::Shape::kM,
threadblock_tile_idx.n() * Mma::Shape::kN
);
// Tile iterator writing to destination tensor
typename Epilogue::OutputTileIterator iterator_D(
params.iterator_D,
params.ptr_D,
ConvOutputIteratorParameter::extent(params.problem_size),
thread_idx,
params.stride_h_divmod, params.stride_w_divmod,
start_r, start_s,
threadblock_offset
);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
if (output_op.is_source_needed())
{
// Tile iterator reading from source accumulator tensor
typename Epilogue::OutputTileIterator iterator_C(
params.iterator_C,
params.ptr_C,
ConvOutputIteratorParameter::extent(params.problem_size),
thread_idx,
params.stride_h_divmod, params.stride_w_divmod,
start_r, start_s,
threadblock_offset);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_idx.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_idx.k());
}
// Run epilogue with addend source iterator
epilogue(output_op, iterator_D, accumulators, iterator_C);
}
else
{
// Run epilogue without addend source iterator
epilogue(output_op, iterator_D, accumulators);
}
//
// Release the semaphore
//
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_idx.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_idx.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/implicit_gemm_convolution_strided_dgrad.h/0 | {
"file_path": "include/cutlass/conv/kernel/implicit_gemm_convolution_strided_dgrad.h",
"repo_id": "include",
"token_count": 6556
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorCxRSKx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>,
bool IsDeconv_ = false
>
class Conv2dFpropFilterTileAccessIteratorOptimized{
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static bool const IsDeconv = IsDeconv_;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
struct Params : Conv2dFpropFilterIteratorOptimizedParams<Layout> {
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Conv2dFpropFilterIteratorOptimizedParams<Layout> const &base):
Conv2dFpropFilterIteratorOptimizedParams<Layout>(base) { }
CUTLASS_HOST_DEVICE
Params(
Conv2dProblemSize const &problem_size,
Layout const &layout
):
Conv2dFpropFilterIteratorOptimizedParams<Layout>(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
) {
}
};
private:
Conv2dFpropFilterIteratorOptimizedParams<Layout> const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
uint32_t predicates_[kAccessesPerVector];
int filter_rs_;
int filter_c_;
int channels_per_group_;
//
// Assertions
//
// We map predicates into bits packed in this uint32_t container
static_assert(ThreadMap::Iterations::kStrided < sizeof(predicates_) * 8,
"Currently, the number of loads per iteration is limited by the size of the predicates container.");
public:
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorOptimized(
Conv2dFpropFilterIteratorOptimizedParams<Layout> const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_{0},
filter_rs_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.row() + thread_coord.contiguous();
Index column = threadblock_offset.column() + thread_coord.strided();
channels_per_group_ = (IsDeconv ? problem_size_.K : problem_size_.C) / problem_size_.groups;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
uint32_t pred = ((column + s * ThreadMap::Delta::kStrided < (IsDeconv ? problem_size_.C : problem_size_.K)) ? 1u : 0);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
predicates_[v_idx] |= (pred << s);
}
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= channels_per_group_);
}
pointer_ += (
params_.layout({filter_c_, column})
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
LongIndex next = params_.inc_next_rs;
// moves to the next tile
++filter_rs_;
if (filter_rs_ == params_.RS) {
filter_rs_ = 0;
next = params_.inc_next_c;
filter_c_ += params_.filter_c_delta;
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= channels_per_group_);
}
pointer_ += next;
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(int v, bool clear = true) {
predicates_[v] = clear ? 0u : predicates_[v];
}
/// Returns true if the current coordinate is within the filter tensor W
CUTLASS_HOST_DEVICE
bool valid() {
return (predicates_[iteration_vector_] & (1u << iteration_strided_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
// Move to the next K coordinate within the tile
pointer_ += params_.inc_next_k;
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
auto input_channels = (IsDeconv ? problem_size.K : problem_size.C);
auto output_channels = (IsDeconv ? problem_size.C : problem_size.K);
// check alignment constraint on iterator's contiguous dimension
if ((input_channels / problem_size.groups) % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorCxRSKx<32>>::value) {
if (output_channels % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorCxRSKx<64>>::value) {
if (output_channels % 64) {
return Status::kErrorInvalidProblem;
}
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h",
"repo_id": "include",
"token_count": 3640
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (activation tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dWgradActivationTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
struct Params {
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
Conv3dProblemSize const &problem_size,
Layout const &layout
): layout(layout) {
}
};
private:
Params const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
// Filter postion (t,r,s,c) in contiguous dimension stays constant for each gemm_iteration_k
int filter_t_[ThreadMap::Iterations::kContiguous];
int filter_r_[ThreadMap::Iterations::kContiguous];
int filter_s_[ThreadMap::Iterations::kContiguous];
int filter_c_[ThreadMap::Iterations::kContiguous];
int offset_nzpq_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv3dWgradActivationTileAccessIteratorAnalytic(
Params const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
// initialize t,r,s,c filter position for every contiguous iteration
CUTLASS_PRAGMA_UNROLL
for(int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int trsc_offset = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
filter_t_[c] = trsc_offset / (problem_size_.R * problem_size_.S * problem_size_.C);
int residual = trsc_offset % (problem_size_.R * problem_size_.S * problem_size_.C);
filter_r_[c] = residual / (problem_size_.S * problem_size_.C);
residual = residual % (problem_size_.S * problem_size_.C);
filter_s_[c] = residual / problem_size_.C;
filter_c_[c] = residual % problem_size_.C;
}
// initialize n, z, p, q offset for every strided iteration
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_nzpq_[s] = threadblock_offset.row() + thread_coord.strided()
+ s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_nzpq_) in GEMM-B by a CTA-K tile
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_nzpq_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the activation tensor x that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int t = filter_t_[iteration_contiguous_];
int r = filter_r_[iteration_contiguous_];
int s = filter_s_[iteration_contiguous_];
if (problem_size_.mode == Mode::kConvolution) {
t = (problem_size_.T - 1 - t);
r = (problem_size_.R - 1 - r);
s = (problem_size_.S - 1 - s);
}
int n = offset_nzpq_[iteration_strided_] / (problem_size_.Z * problem_size_.P * problem_size_.Q);
int residual = offset_nzpq_[iteration_strided_] % (problem_size_.Z * problem_size_.P * problem_size_.Q);
int z = residual / (problem_size_.P * problem_size_.Q);
residual = residual % (problem_size_.P * problem_size_.Q);
int p = residual / problem_size_.Q;
int q = residual % problem_size_.Q;
int d = z * problem_size_.stride_d - problem_size_.pad_d + t * problem_size_.dilation_d;
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
return TensorCoord(n, d, h, w, filter_c_[iteration_contiguous_]);
}
/// Returns true if the current coordinate is within the activation tensor x
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.d() >= 0 && coord.d() < problem_size_.D &&
coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W &&
coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dWgradActivationTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_wgrad_activation_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_wgrad_activation_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 3334
} | 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of scale and bias vectors.
This iterator uses masks to guard out-of-bounds accesses.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedScaleBiasVectorAccessIterator
///
template <typename ThreadblockShape,
typename Element,
typename Layout>
class PredicatedScaleBiasVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for fprop pitch-linear data.
///
template <typename ThreadblockShape_, typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::PitchLinear> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kThreads = ThreadblockShape::kContiguous / kElementsPerAccess;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
using Params = PredicatedScaleBiasVectorAccessIteratorParams;
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
BytePointer pointer_;
int problem_size_trs;
int problem_size_c;
int filter_trs_;
TensorCoord thread_offset_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv2dProblemSize const &problem_size,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
problem_size_trs(problem_size.R * problem_size.S),
problem_size_c(problem_size.C),
filter_trs_(0) {
pointer_ = (thread_id < kThreads)
? reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(scale_pointer))
: reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(bias_pointer));
// Per-thread offset in logical coordinates of tensor
int thread_base = (thread_id < kThreads) ? 0 : kThreads;
thread_offset_ =
threadblock_offset +
TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0);
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv3dProblemSize const &problem_size,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
problem_size_trs(problem_size.T * problem_size.R * problem_size.S),
problem_size_c(problem_size.C),
filter_trs_(0) {
pointer_ = (thread_id < kThreads)
? reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(scale_pointer))
: reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(bias_pointer));
// Per-thread offset in logical coordinates of tensor
int thread_base = (thread_id < kThreads) ? 0 : kThreads;
thread_offset_ =
threadblock_offset +
TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0);
set_iteration_index(0);
}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv2dProblemSize const &problem_size,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorAccessIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv3dProblemSize const &problem_size,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorAccessIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {}
/// Advances an iterator along logical dimensions of matrix in units of whole threadblock tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
thread_offset_ =
thread_offset_ +
TensorCoord(ThreadblockShape::kContiguous * tile_offset.contiguous(), 0);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
(thread_offset_.contiguous() * sizeof_bits<Element>::value / 8));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_trs_;
if (filter_trs_ == problem_size_trs) {
filter_trs_ = 0;
add_tile_offset(TensorCoord(1, 0));
}
}
/// Increment and return an instance to self.
CUTLASS_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
uint32_t enabled = 0;
#if defined(_MSC_VER) || (__CUDACC_VER_MAJOR__ < 11)
enabled = threadIdx.x < kThreads * 2;
#else
asm volatile(
"{\n"
" .reg .u32 tid_reg;\n"
" .reg .pred p;\n"
" mov.u32 tid_reg, %%tid.x;\n"
" setp.lt.u32 p, tid_reg, %1;\n"
" selp.u32 %0, 1, 0, p;\n"
"}\n" : "+r"(enabled) :"n"(kThreads * 2));
#endif
return ((thread_offset_.contiguous() < problem_size_c) && enabled);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename ThreadblockShape_,
typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::RowMajor> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedScaleBiasVectorAccessIterator<
layout::PitchLinearShape<ThreadblockShape::kColumn, ThreadblockShape::kRow>,
Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
using Params = PredicatedScaleBiasVectorAccessIteratorParams;
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Extent of tensor
Conv2dProblemSize const &problem_size,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params, problem_size, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Extent of tensor
Conv3dProblemSize const &problem_size,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params, problem_size, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Conv2dProblemSize const &problem_size, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorAccessIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Conv3dProblemSize const &problem_size, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorAccessIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Advances an iterator along logical dimensions of matrix in units of whole
/// threadblock tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
iterator_.advance();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/predicated_scale_bias_vector_access_iterator.h/0 | {
"file_path": "include/cutlass/conv/threadblock/predicated_scale_bias_vector_access_iterator.h",
"repo_id": "include",
"token_count": 6094
} | 29 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cute/atom/mma_traits_sm90.hpp"
#include "cute/atom/mma_traits_sm90_gmma.hpp"
#include "cute/atom/copy_traits_sm90.hpp"
#include "cutlass/detail/dependent_false.hpp"
#include "cutlass/detail/layout.hpp"
#include "cutlass/gemm/collective/builders/sm90_common.inl"
#include "cutlass/epilogue/dispatch_policy.hpp"
#include "cutlass/epilogue/collective/collective_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_generic.h"
#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h"
#include "cutlass/epilogue/fusion/callbacks.hpp"
#include "cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp"
#if defined(__CUDACC_RTC__)
#include <cuda/std/type_traits>
#else
#include <type_traits>
#endif
///////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::collective {
///////////////////////////////////////////////////////////////////////////////
namespace detail {
// Returns the parameterized dispatch policy for the TMA epilogue
template<class TileShapeMNK, class EpilogueTileMN, class ElementC, class ElementD, class Schedule>
constexpr auto
sm90_get_tma_dispatch_policy() {
using namespace cute;
constexpr int EpiTiles = size(shape_div(take<0,2>(TileShapeMNK{}), EpilogueTileMN{}));
constexpr int FragmentSize = size(EpilogueTileMN{}) / (detail::sm90_is_cooperative_v<Schedule> ? 256 : 128);
// 8b residuals load fast and consume little smem, so the perf cost of waiting on stores to finish outweighs the cost of extra allocation
constexpr bool ReuseSmem = (sizeof_bits_v<ElementC> == sizeof_bits_v<ElementD>) && (sizeof_bits_v<ElementD> > 8);
constexpr bool DelayTmaStore = is_void_v<ElementC>; // TMA store delay performs worse with residual loads
constexpr int StagesD = cute::min(EpiTiles, 2);
constexpr int StagesC = ReuseSmem ? cute::max(cute::min(EpiTiles, 4), StagesD+1)
: cute::min(EpiTiles, 4);
return Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmem, DelayTmaStore>{};
}
// Returns the smem layout atom to be used for C or D matrix
template<class GmemStrideType, class Element, class EpilogueTile_MN>
constexpr auto
sm90_get_epilogue_smem_swizzle_layout_atom() {
using namespace cute;
// ColMajor C/D (M-major)
if constexpr (cutlass::gemm::detail::is_major<0>(GmemStrideType{})) {
return cutlass::gemm::collective::detail::ss_smem_selector<
cute::GMMA::Major::MN, Element, decltype(get<0>(EpilogueTile_MN{})), decltype(get<1>(EpilogueTile_MN{}))
>();
}
// RowMajor C/D (N-major)
else if constexpr (cutlass::gemm::detail::is_major<1>(GmemStrideType{})) {
return cutlass::gemm::collective::detail::ss_smem_selector<
cute::GMMA::Major::K , Element, decltype(get<0>(EpilogueTile_MN{})), decltype(get<1>(EpilogueTile_MN{}))
>();
}
else {
static_assert(cutlass::detail::dependent_false<GmemStrideType>, "Unsupported gmem layout.");
}
}
// Attempts to compute a reasonable epilogue tile based on block tile shape or allows the user to provide one.
template <class ElementD, class EpilogueTileType, class Schedule, class TileShape_MNK>
constexpr auto
sm90_compute_tile_shape_or_override() {
if constexpr (cute::is_same_v<EpilogueTileType, EpilogueTileAuto>) {
auto epi_tile = [&] () {
if constexpr (detail::sm90_is_cooperative_v<Schedule>) {
auto tile_m = cute::min(_128{}, size<0>(TileShape_MNK{}));
auto tile_n = cute::min(_32{}, size<1>(TileShape_MNK{}));
return make_shape(tile_m, tile_n);
}
else if constexpr (detail::sm90_is_warp_specialized_v<Schedule>) {
constexpr int N_perf = sizeof_bits_v<ElementD> == 8 ? 64 : 32;
auto tile_m = cute::min(_64{}, size<0>(TileShape_MNK{}));
auto tile_n = cute::min(Int<N_perf>{}, size<1>(TileShape_MNK{}));
return make_shape(tile_m, tile_n);
}
else {
static_assert(cutlass::detail::dependent_false<Schedule>, "Unsupported schedule.");
}
}();
return cute::transform(epi_tile, seq<0,1>{},
[] (auto epi_tiler, auto I) {
auto cta_tiler = make_layout(get<I>(TileShape_MNK{}));
// This is a multimodal CTA tiler, transform before returning
if constexpr (depth(cta_tiler) > 0) {
// This is an implicit multimodal tiler, match profile and return
if constexpr (tuple_size_v<decltype(shape(cta_tiler))> == 1) {
return make_tile(epi_tiler);
}
// This is an explicit multimodal tiler, compose out epi tiler
else {
return composition(cta_tiler, epi_tiler);
}
}
// This is a flat CTA tiler, no need for transformation
else {
return epi_tiler;
}
});
}
else if constexpr (cute::is_tuple<EpilogueTileType>::value) {
EpilogueTileType epi_tile;
constexpr int M = size<0>(shape(epi_tile));
constexpr int N = size<1>(shape(epi_tile));
static_assert(!is_layout<EpilogueTileType>::value, "EpilogueTile must be a cute::Tile or cute::Shape");
static_assert(M == 64 && detail::sm90_is_warp_specialized_v<Schedule> ||
M == 128 && detail::sm90_is_cooperative_v<Schedule>, "Unsupported tile shape");
static_assert(N % 16 == 0, "Unsupported tile shape");
return epi_tile;
}
else {
static_assert(cutlass::detail::dependent_false<EpilogueTileType>, "Invalid type for EpilogueTileType.");
}
}
// Selects the largest vectorized smem store atom available
template <class GmemStrideTypeD, class ElementD>
constexpr auto
sm90_get_smem_store_op_for_accumulator() {
using namespace cute;
if constexpr (sizeof(ElementD) == 2 && size<0>(GmemStrideTypeD{}) == 1) {
return SM90_U16x8_STSM_T{};
}
else if constexpr (sizeof(ElementD) == 2 && size<1>(GmemStrideTypeD{}) == 1) {
return SM90_U32x4_STSM_N{};
}
else {
// auto-vectorizing store
return AutoVectorizingCopyWithAssumedAlignment{};
}
}
// Selects the largest vectorized smem load atom available
template <class GmemStrideTypeC, class ElementC>
constexpr auto
sm90_get_smem_load_op_for_source() {
using namespace cute;
// Reuse the logic from smem store selector
using SmemStoreOp = decltype(sm90_get_smem_store_op_for_accumulator<GmemStrideTypeC, ElementC>());
if constexpr (cute::is_same_v<SmemStoreOp, SM90_U16x8_STSM_T>) {
return SM75_U16x8_LDSM_T{};
}
else if constexpr (cute::is_same_v<SmemStoreOp, SM90_U32x4_STSM_N>) {
return SM75_U32x4_LDSM_N{};
}
else {
// auto-vectorizing load
return AutoVectorizingCopyWithAssumedAlignment<128>{};
}
}
// callbacks builder with TMA aux out
template <
int StagesC,
int StagesD,
int FragmentSize,
bool ReuseSmemC,
bool DelayTmaStore,
class FusionOp,
class TileShape_MNK,
class EpilogueTile_MN,
class ElementAccumulator
>
struct CallbacksBuilder<
Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
FusionOp,
TileShape_MNK,
EpilogueTile_MN,
ElementAccumulator,
cute::enable_if_t<(FusionOp::IsAuxOutSupported ^ FusionOp::IsAuxInSupported) // only one aux tensor
&& not cute::is_subbyte_v<typename FusionOp::ElementAux>>
> {
using GmemStrideTypeAux = gemm::TagToStrideC_t<typename FusionOp::GmemLayoutTagAux>;
using SmemLayoutAtomAux = decltype(detail::sm90_get_epilogue_smem_swizzle_layout_atom<
GmemStrideTypeAux, typename FusionOp::ElementAux, EpilogueTile_MN>());
using CopyOpR2S = decltype(detail::sm90_get_smem_store_op_for_accumulator<
GmemStrideTypeAux, typename FusionOp::ElementAux>());
using CopyOpS2R = decltype(detail::sm90_get_smem_load_op_for_source<
GmemStrideTypeAux, typename FusionOp::ElementAux>());
using SmemCopyOpAux = cute::conditional_t<FusionOp::IsAuxOutSupported, CopyOpR2S, CopyOpS2R>;
using Callbacks = fusion::FusionCallbacks<
Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
FusionOp, TileShape_MNK, EpilogueTile_MN,
SmemLayoutAtomAux, SmemCopyOpAux
>;
};
template <
int StagesC,
int StagesD,
int FragmentSize,
bool ReuseSmemC,
bool DelayTmaStore,
class FusionOp,
class TileShape_MNK,
class EpilogueTile_MN,
class ElementAccumulator
>
struct CallbacksBuilder<
Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
FusionOp,
TileShape_MNK,
EpilogueTile_MN,
ElementAccumulator,
cute::enable_if_t<(FusionOp::IsAuxOutSupported ^ FusionOp::IsAuxInSupported) // only one aux tensor
&& sizeof_bits_v<typename FusionOp::ElementAux> == 1>
> {
using Callbacks = fusion::FusionCallbacks<
Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>,
FusionOp, TileShape_MNK, EpilogueTile_MN,
Layout<_1,_0>, DefaultCopy // aux bit tensor doesn't use smem
>;
};
// Helper for building TMA warp-specialized collective epilogues, specialized by
// the fusion operation performed and the dispatch policy to use.
template <
class TileShape_MNK,
class EpilogueTile_MN,
class ElementAccumulator,
class ElementCompute,
class ElementC_,
class GmemLayoutTagC_,
int AlignmentC,
class ElementD_,
class GmemLayoutTagD,
int AlignmentD,
class FusionOpOrCallbacks,
class DispatchPolicy
>
struct Sm90TmaBuilderImpl {
// Passing void D disables destination store + smem allocation
using ElementD = cute::conditional_t<cute::is_void_v<ElementD_>,
fusion::get_element_aux_t<FusionOpOrCallbacks>, ElementD_>;
// Passing void C disables source load + smem allocation
using ElementC = cute::conditional_t<cute::is_void_v<ElementC_>,ElementD,ElementC_>; // prevents void ref breakages
using GmemLayoutTagC = cute::conditional_t<cute::is_void_v<ElementC_>,GmemLayoutTagD,GmemLayoutTagC_>;
using GmemStrideTypeC = cutlass::detail::TagToStrideC_t<GmemLayoutTagC>;
using GmemStrideTypeD = cutlass::detail::TagToStrideC_t<GmemLayoutTagD>;
using CopyOpS2G = cute::conditional_t<detail::is_im2col_mode<GmemLayoutTagD>,
SM90_TMA_STORE_IM2COL,
SM90_TMA_STORE
>;
using CopyOpG2S = cute::conditional_t<detail::is_im2col_mode<GmemLayoutTagC>,
SM90_TMA_LOAD_IM2COL,
SM90_TMA_LOAD
>;
// TMA builder allows for passing callbacks directly, which is either a fusion::FusionCallbacks
// instance or a direct visitor implementation, e.g. fusion::Sm90LinearCombination
using FusionCallbacks =
typename CallbacksBuilder<
DispatchPolicy,
FusionOpOrCallbacks,
TileShape_MNK,
EpilogueTile_MN,
ElementAccumulator
>::Callbacks;
using CollectiveOp = cutlass::epilogue::collective::CollectiveEpilogue<
DispatchPolicy,
TileShape_MNK,
EpilogueTile_MN,
ElementC_, // Need to pass void through to expose via GemmUniversal
GmemStrideTypeC,
ElementD_,
GmemStrideTypeD,
FusionCallbacks,
CopyOpG2S,
decltype(detail::sm90_get_epilogue_smem_swizzle_layout_atom<GmemStrideTypeC, ElementC, EpilogueTile_MN>()),
decltype(detail::sm90_get_smem_load_op_for_source<GmemStrideTypeC, ElementC>()),
CopyOpS2G,
decltype(detail::sm90_get_epilogue_smem_swizzle_layout_atom<GmemStrideTypeD, ElementD, EpilogueTile_MN>()),
decltype(detail::sm90_get_smem_store_op_for_accumulator<GmemStrideTypeD, ElementD>())
>;
};
///////////////////////////////////////////////////////////////////////////////
// Descriptor classes for defining EVT nodes
// Some of the epilogue visitor nodes require non-intuitive template arguments
// such as CopyOpS2R for AuxLoad node. Traditionaly, these are resolved by the
// builder classes. Here we provide a set of descriptor classes that resolve
// these template arguments from more intuitive types such as Stride, Layout
// Get TileShape, EpilogueTile, Dispatch Policy, StagesC, and STagesD
template<
typename TileShape_MNK,
typename EpilogueTileType,
typename ElementC,
typename ElementD,
typename Schedule
>
struct EpilogueDescriptor {
using TileShape = TileShape_MNK;
using EpilogueTile =
decltype(
detail::sm90_compute_tile_shape_or_override<
ElementD, EpilogueTileType, Schedule, TileShape_MNK
>()
);
using DispatchPolicy =
decltype(
detail::sm90_get_tma_dispatch_policy<
TileShape_MNK, EpilogueTile,
ElementC, ElementD, Schedule
>()
);
constexpr static int StagesC = DispatchPolicy::StagesC;
constexpr static int StagesD = DispatchPolicy::StagesD;
};
// Get Stride, SmemLayout, and CopyOpS2R for AuxLoad node
template<
typename EpilogueDescriptor,
typename StrideOrLayoutTag,
typename ElementAux
>
struct AuxLoadDescriptor {
constexpr static int Stages = EpilogueDescriptor::StagesC;
using EpilogueTile = typename EpilogueDescriptor::EpilogueTile;
using Element = ElementAux;
using Stride = cutlass::detail::TagToStrideC_t<StrideOrLayoutTag>;
using SmemLayoutAtom =
decltype(
detail::sm90_get_epilogue_smem_swizzle_layout_atom<
Stride, ElementAux, typename EpilogueDescriptor::EpilogueTile
>()
);
using CopyOpS2R =
decltype(detail::sm90_get_smem_load_op_for_source<Stride, ElementAux>());
};
// Get Stride, SmemLayout, and CopyOpS2R for AuxStore node
template<
typename EpilogueDescriptor,
typename StrideOrLayoutTag,
typename ElementAux
>
struct AuxStoreDescriptor {
constexpr static int Stages = EpilogueDescriptor::StagesD;
using EpilogueTile = typename EpilogueDescriptor::EpilogueTile;
using Element = ElementAux;
using Stride = cutlass::detail::TagToStrideC_t<StrideOrLayoutTag>;
using SmemLayoutAtom =
decltype(
detail::sm90_get_epilogue_smem_swizzle_layout_atom<
Stride, ElementAux, typename EpilogueDescriptor::EpilogueTile
>()
);
using CopyOpR2S =
decltype(detail::sm90_get_smem_store_op_for_accumulator<Stride, ElementAux>());
};
template<
typename EpilogueDescriptor,
typename ElementVector
>
struct RowBroadcastDescriptor {
constexpr static int Stages = ceil_div(
EpilogueDescriptor::StagesC,
size(shape_div(take<0, 2>(typename EpilogueDescriptor::TileShape{}), typename EpilogueDescriptor::EpilogueTile{}))
) + 1;
using Element = ElementVector;
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////
// No-smem builder
template <
class TileShape_MNK,
class ClusterShape_MNK,
class EpilogueTileType,
class ElementAccumulator,
class ElementCompute,
class ElementC_,
class GmemLayoutTagC_,
int AlignmentC,
class ElementD,
class GmemLayoutTagD,
int AlignmentD,
class Schedule,
FloatRoundStyle RoundStyle
>
struct CollectiveBuilder<
arch::Sm90,
arch::OpClassTensorOp,
TileShape_MNK,
ClusterShape_MNK,
EpilogueTileType,
ElementAccumulator,
ElementCompute,
ElementC_,
GmemLayoutTagC_,
AlignmentC,
ElementD,
GmemLayoutTagD,
AlignmentD,
Schedule,
fusion::LinearCombination<ElementD,ElementCompute,ElementC_,ElementCompute,RoundStyle>,
cute::enable_if_t<cute::is_same_v<Schedule, NoSmemWarpSpecialized> ||
cute::is_same_v<Schedule, PtrArrayNoSmemWarpSpecialized> >> {
// Passing void C disables source load
using ElementC = cute::conditional_t<cute::is_void_v<ElementC_>,
ElementD, ElementC_>; // prevents cute breakages
using GmemLayoutTagC = cute::conditional_t<cute::is_void_v<ElementC_>,
GmemLayoutTagD, GmemLayoutTagC_>;
static constexpr thread::ScaleType::Kind ScaleType = cute::is_void_v<ElementC_> ?
thread::ScaleType::OnlyAlphaScaling : thread::ScaleType::Default;
static constexpr int FragmentSize = 1;
using ThreadOp = thread::LinearCombination<
ElementD, FragmentSize, ElementAccumulator, ElementCompute,
ScaleType, RoundStyle, ElementC>;
using CollectiveOp = cute::conditional_t<
cute::is_same_v<Schedule, NoSmemWarpSpecialized>,
cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<
cutlass::epilogue::collective::DefaultEpilogue<
cutlass::detail::TagToStrideC_t<GmemLayoutTagC>,
cutlass::detail::TagToStrideC_t<GmemLayoutTagD>,
ThreadOp,
cutlass::gemm::EpilogueDefault>>,
// Epilogue for Ptr-Array and Grouped Gemm
cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<
cutlass::epilogue::collective::DefaultEpilogueArray<
cutlass::detail::TagToStrideC_t<GmemLayoutTagC>,
cutlass::detail::TagToStrideC_t<GmemLayoutTagD>,
ThreadOp,
Schedule>>
>;
};
// Tma warp-specialized builder
template <
class TileShape_MNK,
class ClusterShape_MNK,
class EpilogueTileType,
class ElementAccumulator,
class ElementCompute,
class ElementC,
class GmemLayoutTagC,
int AlignmentC,
class ElementD_,
class GmemLayoutTagD,
int AlignmentD,
class Schedule,
class FusionOperation
>
struct CollectiveBuilder<
arch::Sm90,
arch::OpClassTensorOp,
TileShape_MNK,
ClusterShape_MNK,
EpilogueTileType,
ElementAccumulator,
ElementCompute,
ElementC,
GmemLayoutTagC,
AlignmentC,
ElementD_,
GmemLayoutTagD,
AlignmentD,
Schedule,
FusionOperation,
cute::enable_if_t<cute::is_same_v<Schedule, TmaWarpSpecialized> ||
cute::is_same_v<Schedule, TmaWarpSpecializedCooperative> >> {
private:
using ElementD = cute::conditional_t<cute::is_void_v<ElementD_>,
fusion::get_element_aux_t<FusionOperation>, ElementD_>;
using EpilogueTile_MN =
decltype(detail::sm90_compute_tile_shape_or_override<ElementD, EpilogueTileType, Schedule, TileShape_MNK>());
using DispatchPolicy =
decltype(detail::sm90_get_tma_dispatch_policy<TileShape_MNK,EpilogueTile_MN,ElementC,ElementD,Schedule>());
public:
using CollectiveOp =
typename detail::Sm90TmaBuilderImpl<
TileShape_MNK,
EpilogueTile_MN,
ElementAccumulator,
ElementCompute,
ElementC,
GmemLayoutTagC,
AlignmentC,
ElementD_,
GmemLayoutTagD,
AlignmentD,
FusionOperation,
DispatchPolicy
>::CollectiveOp;
};
// Auto builder
template <
class TileShape_MNK,
class ClusterShape_MNK,
class EpilogueTileType,
class ElementAccumulator,
class ElementCompute,
class ElementC,
class GmemLayoutTagC,
int AlignmentC,
class ElementD,
class GmemLayoutTagD,
int AlignmentD,
class FusionOperation
>
struct CollectiveBuilder<
arch::Sm90,
arch::OpClassTensorOp,
TileShape_MNK,
ClusterShape_MNK,
EpilogueTileType,
ElementAccumulator,
ElementCompute,
ElementC,
GmemLayoutTagC,
AlignmentC,
ElementD,
GmemLayoutTagD,
AlignmentD,
EpilogueScheduleAuto,
FusionOperation,
void> {
private:
static_assert(cute::is_same_v<FusionOperation, fusion::LinearCombination<ElementD,ElementCompute,ElementC,ElementCompute>>,
"Auto schedule doesn't support fusion. Use one of the TmaWarpSpecialized schedules instead.");
// Pick No-Smem epilogue as the Auto Epilogue Schedule (Auto schedules do not guarantee best performance)
// since TMA epilogues are not compatible with non-TMA non-WS mainloops
using EpilogueSchedule = NoSmemWarpSpecialized;
using _CollectiveBuilder = CollectiveBuilder<
arch::Sm90,
arch::OpClassTensorOp,
TileShape_MNK,
ClusterShape_MNK,
EpilogueTileType,
ElementAccumulator,
ElementCompute,
ElementC,
GmemLayoutTagC,
AlignmentC,
ElementD,
GmemLayoutTagD,
AlignmentD,
EpilogueSchedule,
FusionOperation
>;
public:
using CollectiveOp = typename _CollectiveBuilder::CollectiveOp;
};
// DEPRECATED Tma warp-specialized builder for elementwise fusion
template <
class TileShape_MNK,
class ClusterShape_MNK,
class EpilogueTileType,
class ElementAccumulator,
class ElementCompute,
class ElementC,
class GmemLayoutTagC,
int AlignmentC,
class ElementD,
class GmemLayoutTagD,
int AlignmentD,
class Schedule,
class UnusedFusionOp
>
struct [[deprecated("Use TmaWarpSpecialized with fusion::LinCombEltAct instead")]]
CollectiveBuilder<
arch::Sm90,
arch::OpClassTensorOp,
TileShape_MNK,
ClusterShape_MNK,
EpilogueTileType,
ElementAccumulator,
ElementCompute,
ElementC,
GmemLayoutTagC,
AlignmentC,
ElementD,
GmemLayoutTagD,
AlignmentD,
Schedule,
UnusedFusionOp,
cute::enable_if_t<cute::is_base_of_v<TmaWarpSpecializedElementwiseBase, Schedule> ||
cute::is_base_of_v<TmaWarpSpecializedCooperativeElementwiseBase, Schedule> >> {
private:
using FusionOp =
fusion::LinCombEltAct<Schedule::template ActivationFunctor, ElementD, ElementCompute, ElementC, ElementCompute, Schedule::Round>;
using ImplSchedule =
cute::conditional_t<cute::is_base_of_v<TmaWarpSpecializedElementwiseBase, Schedule>,
TmaWarpSpecialized, TmaWarpSpecializedCooperative>;
public:
using CollectiveOp =
typename CollectiveBuilder<
arch::Sm90,
arch::OpClassTensorOp,
TileShape_MNK,
ClusterShape_MNK,
EpilogueTileType,
ElementAccumulator,
ElementCompute,
ElementC,
GmemLayoutTagC,
AlignmentC,
ElementD,
GmemLayoutTagD,
AlignmentD,
ImplSchedule,
FusionOp
>::CollectiveOp;
};
// DEPRECATED Tma warp-specialized builder for bias + elementwise fusion
template <
class TileShape_MNK,
class ClusterShape_MNK,
class EpilogueTileType,
class ElementAccumulator,
class ElementCompute,
class ElementC_,
class GmemLayoutTagC_,
int AlignmentC,
class ElementD,
class GmemLayoutTagD,
int AlignmentD,
class Schedule,
class UnusedFusionOp
>
struct [[deprecated("Use TmaWarpSpecialized with fusion::LinCombPerRowBiasEltAct or fusion::LinCombPerRowBiasEltActAux instead")]]
CollectiveBuilder<
arch::Sm90,
arch::OpClassTensorOp,
TileShape_MNK,
ClusterShape_MNK,
EpilogueTileType,
ElementAccumulator,
ElementCompute,
ElementC_,
GmemLayoutTagC_,
AlignmentC,
ElementD,
GmemLayoutTagD,
AlignmentD,
Schedule,
UnusedFusionOp,
cute::enable_if_t<cute::is_base_of_v<TmaWarpSpecializedBiasElementwiseBase, Schedule> ||
cute::is_base_of_v<TmaWarpSpecializedCooperativeBiasElementwiseBase, Schedule> >> {
private:
using EpilogueTile_MN = decltype(detail::sm90_compute_tile_shape_or_override<
ElementD, EpilogueTileType, Schedule, TileShape_MNK>());
// MSVC doesn't seem to be able to deduce DispatchPolicy correctly if it's
// defined as decltype of a detail::sm90_get_tma_dispatch_policy call.
// Instead, we paste in the contents of that function. A natural refactoring
// would be to create a type alias in the detail namespace.
using DispatchPolicy = Sm90TmaWarpSpecialized<
/* StagesC = */ size(shape_div(take<0, 2>(TileShape_MNK{}), EpilogueTile_MN{})),
/* StagesD = */ 2,
/* FragmentSize = */ size(EpilogueTile_MN{}) / (detail::sm90_is_cooperative_v<Schedule> ? 256 : 128),
/* ReuseSmemC = */ sizeof_bits_v<ElementC_> == sizeof_bits_v<ElementD>,
false
>;
using GmemStrideTypeAux = gemm::TagToStrideC_t<GmemLayoutTagD>;
using SmemLayoutAtomAux = decltype(detail::sm90_get_epilogue_smem_swizzle_layout_atom<
GmemStrideTypeAux, typename Schedule::ElementT, EpilogueTile_MN>());
using SmemCopyOpAux = decltype(detail::sm90_get_smem_store_op_for_accumulator<
GmemStrideTypeAux, typename Schedule::ElementT>());
using FusionOperationAux = fusion::LinCombPerRowBiasEltActAux<
GmemLayoutTagD, Schedule::template ActivationFunctor, ElementD, ElementCompute,
typename Schedule::ElementT, typename Schedule::ElementBias, ElementC_, ElementCompute
>;
using FusionCallbacksAux = fusion::FusionCallbacks<
DispatchPolicy, FusionOperationAux, TileShape_MNK, EpilogueTile_MN, SmemLayoutAtomAux, SmemCopyOpAux
>;
using FusionOperationNoAux = fusion::LinCombPerRowBiasEltAct<
Schedule::template ActivationFunctor, ElementD, ElementCompute,
typename Schedule::ElementBias, ElementC_, ElementCompute
>;
using FusionCallbacksNoAux = fusion::FusionCallbacks<
DispatchPolicy, FusionOperationNoAux, TileShape_MNK, EpilogueTile_MN
>;
using ElementC = cute::conditional_t<cute::is_void_v<ElementC_>,ElementD,ElementC_>; // prevents void ref breakages
using GmemLayoutTagC = cute::conditional_t<cute::is_void_v<ElementC_>,GmemLayoutTagD,GmemLayoutTagC_>;
using GmemStrideTypeC = gemm::TagToStrideC_t<GmemLayoutTagC>;
using GmemStrideTypeD = gemm::TagToStrideC_t<GmemLayoutTagD>;
public:
using CollectiveOp = cutlass::epilogue::collective::Sm90EpilogueTmaWarpSpecializedBiasElementwise<
DispatchPolicy::StagesC,
DispatchPolicy::StagesD,
DispatchPolicy::FragmentSize,
TileShape_MNK,
EpilogueTile_MN,
ElementC_, // Need to pass void through to expose via GemmUniversal
GmemStrideTypeC,
ElementD,
GmemStrideTypeD,
cute::conditional_t<Schedule::StoreT, FusionCallbacksAux, FusionCallbacksNoAux>,
SM90_TMA_LOAD,
decltype(detail::sm90_get_epilogue_smem_swizzle_layout_atom<GmemStrideTypeC, ElementC, EpilogueTile_MN>()),
decltype(detail::sm90_get_smem_load_op_for_source<GmemStrideTypeC, ElementC>()),
SM90_TMA_STORE,
decltype(detail::sm90_get_epilogue_smem_swizzle_layout_atom<GmemStrideTypeD, ElementD, EpilogueTile_MN>()),
decltype(detail::sm90_get_smem_store_op_for_accumulator<GmemStrideTypeD, ElementD>())
>;
};
// CollectiveBuilder that transposed epilogue below is used for sm90 gmma RS TT kernels
// since swapping NNN kernels input matrix and transposing its output at the same time then
// we can get TTN kernel.
template <
class TileShape_MNK,
class ClusterShape_MNK,
class EpilogueTileType,
class ElementAccumulator,
class ElementCompute,
class ElementC_,
class GmemLayoutTagC_,
int AlignmentC,
class ElementD,
class GmemLayoutTagD,
int AlignmentD,
FloatRoundStyle RoundStyle
>
struct CollectiveBuilder<
arch::Sm90,
arch::OpClassTensorOp,
TileShape_MNK,
ClusterShape_MNK,
EpilogueTileType,
ElementAccumulator,
ElementCompute,
ElementC_,
GmemLayoutTagC_,
AlignmentC,
ElementD,
GmemLayoutTagD,
AlignmentD,
cutlass::gemm::EpilogueTransposed,
fusion::LinearCombination<ElementD,ElementCompute,ElementC_,ElementCompute,RoundStyle>,
void> {
// Passing void C disables source load
using ElementC = cute::conditional_t<cute::is_void_v<ElementC_>,
ElementD, ElementC_>; // prevents cute breakages
using GmemLayoutTagC = cute::conditional_t<cute::is_void_v<ElementC_>,
GmemLayoutTagD, GmemLayoutTagC_>;
static constexpr thread::ScaleType::Kind ScaleType = cute::is_void_v<ElementC_> ?
thread::ScaleType::OnlyAlphaScaling : thread::ScaleType::Default;
static constexpr int FragmentSize = 1;
using ThreadOp = thread::LinearCombination<
ElementD, FragmentSize, ElementAccumulator, ElementCompute,
ScaleType, RoundStyle, ElementC>;
using CollectiveOp = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<
cutlass::epilogue::collective::DefaultEpilogue<
cutlass::detail::TagToStrideC_t<GmemLayoutTagC>,
cutlass::detail::TagToStrideC_t<GmemLayoutTagD>,
ThreadOp,
cutlass::gemm::EpilogueTransposed>
>;
};
///////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::collective
| include/cutlass/epilogue/collective/builders/sm90_builder.inl/0 | {
"file_path": "include/cutlass/epilogue/collective/builders/sm90_builder.inl",
"repo_id": "include",
"token_count": 11110
} | 30 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Visitor tree store operations for the sm90 TMA warp-specialized (ws) epilogue
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/workspace.h"
#include "cute/tensor.hpp"
#include "sm90_visitor_tma_warpspecialized.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::fusion {
using namespace cute;
using namespace detail;
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Elementwise Store Operations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int Stages,
class EpilogueTile,
class Element,
FloatRoundStyle RoundStyle,
class StrideMNL,
class SmemLayoutAtom,
class CopyOpR2S,
int Alignment = 128 / sizeof_bits_v<Element>,
bool EnableNullptr = true // Noop on nullptr params
>
struct Sm90AuxStore {
using ElementAux = Element;
static_assert(Alignment * sizeof_bits_v<Element> % 128 == 0, "sub-16B alignment not supported yet");
constexpr static bool is_m_major = epilogue::collective::detail::is_m_major<StrideMNL>();
// Find the max contiguous layout usable by TMA (if EpilogueTile is a non-compact tiler)
using SmemShapeTma = decltype(make_shape(
max_common_vector(make_layout(get<0>(EpilogueTile{})),make_layout(get<0>(EpilogueTile{}))),
max_common_vector(make_layout(get<1>(EpilogueTile{})),make_layout(get<1>(EpilogueTile{})))));
using SmemLayoutTma = decltype(tile_to_shape(
SmemLayoutAtom{}, SmemShapeTma{},
cute::conditional_t<is_m_major, Step<_2,_1>, Step<_1,_2>>{} ));
using SmemLayout = decltype(tile_to_shape(
SmemLayoutTma{},
make_shape(size<0>(shape(EpilogueTile{})), size<1>(shape(EpilogueTile{})), Int<Stages>{}),
cute::conditional_t<is_m_major, Step<_2,_1,_3>, Step<_1,_2,_3>>{} ));
struct SharedStorage {
alignas(cutlass::detail::alignment_for_swizzle(SmemLayout{}))
array_aligned<Element, size(SmemLayout{})> smem_aux;
};
struct Arguments {
Element* ptr_aux = nullptr;
StrideMNL dAux = {};
};
struct Params {
using TMA_Aux = decltype(make_tma_copy(
SM90_TMA_STORE{},
make_tensor(static_cast<Element*>(nullptr), repeat_like(StrideMNL{}, int32_t(0)), StrideMNL{}),
SmemLayoutTma{}));
TMA_Aux tma_store_aux;
bool is_nullptr = false;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
// Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK)
auto problem_shape_mnkl = append<4>(problem_shape, 1);
auto [M, N, K, L] = problem_shape_mnkl;
bool is_nullptr = false;
if constexpr (EnableNullptr) {
is_nullptr = args.ptr_aux == nullptr;
}
typename Params::TMA_Aux tma_store_aux;
if (not is_nullptr) {
Tensor tensor_aux = make_tensor(args.ptr_aux, make_layout(make_shape(M,N,L), args.dAux));
tma_store_aux = make_tma_copy(SM90_TMA_STORE{}, tensor_aux, SmemLayoutTma{});
}
return {tma_store_aux, is_nullptr};
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return 0;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
return cutlass::Status::kSuccess;
}
CUTLASS_HOST_DEVICE
Sm90AuxStore() { }
CUTLASS_HOST_DEVICE
Sm90AuxStore(Params const& params, SharedStorage const& shared_storage)
: params_ptr(¶ms),
smem_aux(const_cast<Element*>(shared_storage.smem_aux.data())) { }
Params const* params_ptr;
Element* smem_aux;
CUTLASS_DEVICE bool
is_producer_load_needed() const {
return false;
}
CUTLASS_DEVICE bool
is_C_load_needed() const {
return false;
}
template <class... Args>
CUTLASS_DEVICE auto
get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) {
return EmptyProducerLoadCallbacks{};
}
template <
class RTensor,
class TiledR2S,
class STensorR2S,
class STensorS2G,
class GTensorS2G
>
struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks {
CUTLASS_DEVICE
ConsumerStoreCallbacks(
RTensor&& tC_rAux,
TiledR2S tiled_r2s,
STensorR2S&& tRS_sAux,
STensorS2G&& bSG_sAux,
GTensorS2G&& bSG_gAux,
Params const* params_ptr)
: tiled_r2s(tiled_r2s),
tC_rAux(cute::forward<RTensor>(tC_rAux)),
tRS_sAux(cute::forward<STensorR2S>(tRS_sAux)),
bSG_sAux(cute::forward<STensorS2G>(bSG_sAux)),
bSG_gAux(cute::forward<GTensorS2G>(bSG_gAux)),
params_ptr(params_ptr) {}
TiledR2S tiled_r2s;
RTensor tC_rAux; // (CPY,CPY_M,CPY_N)
STensorR2S tRS_sAux; // (R2S,R2S_M,R2S_N,PIPE)
STensorS2G bSG_sAux; // (S2G,S2G_M,S2G_N,PIPE)
GTensorS2G bSG_gAux; // (S2G,S2G_M,S2G_N,EPI_M,EPI_N)
Params const* params_ptr;
template <typename ElementAccumulator, typename ElementInput, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n,
Array<ElementInput, FragmentSize> const& frg_input) {
using ConvertInput = NumericArrayConverter<Element, ElementInput, FragmentSize, RoundStyle>;
ConvertInput convert_input{};
Tensor tC_rAux_frg = recast<Array<Element, FragmentSize>>(coalesce(tC_rAux)); // (EPI_V)
tC_rAux_frg(epi_v) = convert_input(frg_input);
return frg_input;
}
CUTLASS_DEVICE void
postreduce(int epi_m, int epi_n, int store_iteration, bool issue_smem_store) {
if constexpr (EnableNullptr) {
if (params_ptr->is_nullptr) {
return;
}
}
using RLayoutR2S = decltype(cute::layout(TiledR2S{}.get_slice(0).retile_S(RTensor{})));
Tensor tRS_rAux = make_tensor(tC_rAux.data(), RLayoutR2S{}); // (R2S,R2S_M,R2S_N)
if (issue_smem_store) {
int store_pipe_index = store_iteration % Stages;
copy(tiled_r2s, tRS_rAux, tRS_sAux(_,_,_,store_pipe_index));
}
}
CUTLASS_DEVICE void
tma_store(int epi_m, int epi_n, int store_iteration, bool issue_tma_store) {
if constexpr (EnableNullptr) {
if (params_ptr->is_nullptr) {
return;
}
}
if (issue_tma_store) {
// Issue the TMA store
int store_pipe_index = store_iteration % Stages;
copy(params_ptr->tma_store_aux, bSG_sAux(_,_,_,store_pipe_index), bSG_gAux(_,_,_,epi_m,epi_n));
}
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
auto [M, N, K, L] = args.problem_shape_mnkl;
auto [m, n, k, l] = args.tile_coord_mnkl;
Tensor mAux = params_ptr->tma_store_aux.get_tma_tensor(make_shape(M,N,L)); // (M,N,L)
Tensor gAux = local_tile(mAux, take<0,2>(args.tile_shape_mnk), make_coord(m,n,l)); // (CTA_M,CTA_N)
Tensor tC_gAux = sm90_partition_for_epilogue<ReferenceSrc>( // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
gAux, args.epi_tile, args.tiled_copy, args.thread_idx);
Tensor tC_rAux = make_tensor<Element>(take<0,3>(shape(tC_gAux))); // (CPY,CPY_M,CPY_N)
Tensor sAux_epi = cute::as_position_independent_swizzle_tensor(
make_tensor(make_smem_ptr(smem_aux), SmemLayout{})); // (EPI_TILE_M,EPI_TILE_N,PIPE)
Tensor gAux_epi = flat_divide(gAux, args.epi_tile); // (EPI_TILE_M,EPI_TILE_N,EPI_M,EPI_N)
auto tiled_r2s = conditional_return<ReferenceSrc>(
make_tiled_copy_S(Copy_Atom<CopyOpR2S,Element>{}, args.tiled_copy),
make_tiled_copy_D(Copy_Atom<CopyOpR2S,Element>{}, args.tiled_copy)
);
auto tRS_sAux = tiled_r2s.get_slice(args.thread_idx).partition_D(sAux_epi); // (R2S,R2S_M,R2S_N,PIPE)
ThrCopy thrblk_s2g = params_ptr->tma_store_aux.get_slice(_0{});
Tensor bSG_sAux = thrblk_s2g.partition_S(sAux_epi); // (TMA,TMA_M,TMA_N,PIPE)
Tensor bSG_gAux = thrblk_s2g.partition_D(gAux_epi); // (TMA,TMA_M,TMA_N,EPI_M,EPI_N)
return ConsumerStoreCallbacks<decltype(tC_rAux), decltype(tiled_r2s), decltype(tRS_sAux), decltype(bSG_sAux), decltype(bSG_gAux)>(
cute::move(tC_rAux),
tiled_r2s,
cute::move(tRS_sAux),
cute::move(bSG_sAux),
cute::move(bSG_gAux),
params_ptr);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Reduction Store Operations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
// Scalar reduction
template <
template <class> class RegReduceFn,
template <class> class GmemReduceFn,
class ElementOutput,
class ElementCompute,
FloatRoundStyle RoundStyle,
class StrideMNL = Stride<_0,_0,_0>,
bool EnableNullptr = true // Noop on nullptr params
>
struct Sm90ScalarReduction {
private:
static_assert(
(cute::is_same_v<StrideMNL, Stride<_0,_0, _0>>) || // scalar reduction, e.g. tensor max element
(cute::is_same_v<StrideMNL, Stride<_0,_0, _1>>) || // batched scalar reduction, e.g. per-batch max element
(cute::is_same_v<StrideMNL, Stride<_0,_0,int>>));
static constexpr bool IsAtomic = is_atomic<GmemReduceFn<ElementCompute>>::value;
static_assert(IsAtomic, "non-atomic scalar reduction not supported yet");
public:
struct SharedStorage { };
struct Arguments {
ElementOutput* ptr_scalar = nullptr;
ElementCompute reduction_identity = ElementCompute(0);
StrideMNL dScalar = {};
};
using Params = Arguments;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
return args;
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return 0;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
if constexpr (IsAtomic) {
auto [M, N, K, L] = problem_shape;
Layout mScalar_layout = make_layout(make_shape(M,N,L), args.dScalar);
if (args.ptr_scalar != nullptr) {
return fill_workspace(args.ptr_scalar, ElementOutput(args.reduction_identity), cosize(mScalar_layout), stream, cuda_adapter);
}
}
return cutlass::Status::kSuccess;
}
CUTLASS_DEVICE bool
is_producer_load_needed() const {
return false;
}
CUTLASS_DEVICE bool
is_C_load_needed() const {
return false;
}
CUTLASS_HOST_DEVICE
Sm90ScalarReduction() { }
CUTLASS_HOST_DEVICE
Sm90ScalarReduction(Params const& params, SharedStorage const& shared_storage)
: params(params) { }
Params const params;
template <class... Args>
CUTLASS_DEVICE auto
get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) {
return EmptyProducerLoadCallbacks{};
}
template<class CTensor, class ResidueMN>
struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks {
CUTLASS_DEVICE
ConsumerStoreCallbacks(
int l_coord,
CTensor tCcScalar,
ResidueMN residue_mn,
Params const& params)
: scalar(params.reduction_identity),
l_coord(l_coord),
tCcScalar(tCcScalar),
residue_mn(residue_mn),
params(params) {}
ElementCompute scalar;
int l_coord;
CTensor tCcScalar; // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
ResidueMN residue_mn;
Params params;
template <typename ElementAccumulator, typename ElementInput, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n,
Array<ElementInput, FragmentSize> const& frg_input) {
if constexpr (EnableNullptr) {
if (params.ptr_scalar == nullptr) {
return frg_input;
}
}
using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>;
using ReduceInput = RegReduceFn<ElementCompute>;
ConvertInput convert_input{};
ReduceInput reduce_input{};
Array frg_I = convert_input(frg_input);
Tensor tCcScalar_mn = tCcScalar(_,_,_,epi_m,epi_n);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < FragmentSize; ++i) {
if (elem_less(tCcScalar_mn(epi_v * FragmentSize + i), residue_mn)) {
scalar = reduce_input(scalar, frg_I[i]);
}
}
return frg_input;
}
CUTLASS_DEVICE void
end() {
if constexpr (EnableNullptr) {
if (params.ptr_scalar == nullptr) {
return;
}
}
using ConvertI = NumericConverter<ElementOutput, ElementCompute, RoundStyle>;
using ReduceInput = GmemReduceFn<ElementOutput>;
ConvertI convert_I{};
ReduceInput reduce_input{};
ElementOutput* ptr_scalar = params.ptr_scalar + l_coord * get<2>(params.dScalar);
reduce_input(ptr_scalar, convert_I(scalar));
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
return ConsumerStoreCallbacks<decltype(args.tCcD), decltype(args.residue_mn)>(
get<3>(args.tile_coord_mnkl), args.tCcD, args.residue_mn, params);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Row vector reduction
template <
template <class> class RegReduceFn,
template <class> class ShuffleReduceFn,
template <class> class GmemReduceFn,
int Stages,
class CtaTileShapeMNK,
class ElementOutput,
class ElementCompute,
FloatRoundStyle RoundStyle,
class StrideMNL = Stride<_0,_1,_0>,
int Alignment = 128 / sizeof_bits_v<ElementOutput>,
bool EnableNullptr = true, // Noop on nullptr params
// If this is false, ptr_row is assumed to point to a compact n-major (ceil_div(M,CTA_M), round_nearest(N,CTA_N), L)
// tensor of ElementCompute. It is the user's responsibility to reduce this to a (N, L) tensor of ElementOutput
bool FinalReduction = true,
// False means skip OOB predication if OOB inputs are known to be the reduction identity
bool VisitCheckOOB = true
>
struct Sm90RowReduction {
private:
static_assert(Stages == 0, "Smem usage not supported yet");
static_assert(Alignment * sizeof_bits_v<ElementOutput> % 128 == 0, "sub-16B alignment not supported yet");
static_assert(
(cute::is_same_v<StrideMNL, Stride<_0,_1, _0>>) || // row vector reduction, e.g. per-col sum over all batches
(cute::is_same_v<StrideMNL, Stride<_0,_1,int>>)); // batched row vector reduction, e.g. per-col sum per batch
static constexpr bool IsAtomic = is_atomic<GmemReduceFn<ElementCompute>>::value;
static_assert(not (IsAtomic && not FinalReduction), "atomic reduction must be final");
public:
struct SharedStorage { };
struct Arguments {
void* ptr_row = nullptr; // ElementOutput* if FinalReduction, else ElementCompute*
ElementCompute reduction_identity = 0;
StrideMNL dRow = {};
};
struct Params {
void* ptr_row = nullptr;
ElementCompute reduction_identity = 0;
StrideMNL dRow = {};
ElementCompute* reduction_buffer = nullptr;
int* tile_counters = nullptr;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
ElementCompute* reduction_buffer;
int* tile_counters = nullptr;
if constexpr (IsAtomic) {
reduction_buffer = nullptr;
}
else if constexpr (not FinalReduction) {
reduction_buffer = reinterpret_cast<ElementCompute*>(args.ptr_row);
}
else {
auto [M, N, K, L] = problem_shape;
auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{};
size_t tile_counters_offset = product(ceil_div(make_shape(size<>(M), size<>(N), L), make_shape(tile_M, tile_N))) * tile_N * sizeof(ElementCompute);
tile_counters_offset = round_nearest(tile_counters_offset, sizeof(int));
reduction_buffer = reinterpret_cast<ElementCompute*>(workspace);
tile_counters = reinterpret_cast<int*>(reinterpret_cast<uint8_t*>(workspace) + tile_counters_offset);
}
return {
args.ptr_row,
args.reduction_identity,
args.dRow,
reduction_buffer,
tile_counters
};
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
if constexpr (IsAtomic || not FinalReduction) {
return 0;
}
size_t workspace_size = 0;
auto [M, N, K, L] = problem_shape;
auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{};
// Increment by size of reduction buffer
workspace_size += product(ceil_div(make_shape(size<>(M),size<>(N),L), make_shape(tile_M, tile_N))) * tile_N * sizeof(ElementCompute);
// Align and increment by size of tile counters
workspace_size = round_nearest(workspace_size, sizeof(int));
workspace_size += cute::ceil_div(size<>(N), tile_N) * sizeof(int);
return workspace_size;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
if constexpr (IsAtomic) {
auto [M, N, K, L] = problem_shape;
Layout mRow_layout = make_layout(make_shape(M,N,L), args.dRow);
if (args.ptr_row != nullptr) {
return fill_workspace(args.ptr_row, ElementOutput(args.reduction_identity), cosize(mRow_layout), stream, cuda_adapter);
}
return Status::kSuccess;
}
auto [M, N, K, L] = problem_shape;
auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{};
size_t tile_counters_offset = product(ceil_div(make_shape(size<>(M),size<>(N),L), make_shape(tile_M, tile_N))) * tile_N * sizeof(ElementCompute);
int* tile_counters = reinterpret_cast<int*>(reinterpret_cast<uint8_t*>(workspace) + tile_counters_offset);
size_t tile_counters_size = cute::ceil_div(size<>(N), tile_N) * sizeof(int);
return zero_workspace(tile_counters, tile_counters_size, stream);
}
CUTLASS_DEVICE bool
is_producer_load_needed() const {
return false;
}
CUTLASS_DEVICE bool
is_C_load_needed() const {
return false;
}
CUTLASS_HOST_DEVICE
Sm90RowReduction() { }
CUTLASS_HOST_DEVICE
Sm90RowReduction(Params const& params, SharedStorage const& shared_storage)
: params(params) { }
Params params;
template <class... Args>
CUTLASS_DEVICE auto
get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) {
return EmptyProducerLoadCallbacks{};
}
template<class ArgsTuple>
struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks {
CUTLASS_DEVICE
ConsumerStoreCallbacks(ArgsTuple&& args_tuple, Params const& params)
: args_tuple(cute::forward<ArgsTuple>(args_tuple)),
params(params) {}
ArgsTuple args_tuple;
Params const& params;
bool do_final_reduction = false;
template <typename ElementAccumulator, typename ElementInput, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n,
Array<ElementInput, FragmentSize> const& frg_input) {
if constexpr (EnableNullptr) {
if (params.ptr_row == nullptr) {
return frg_input;
}
}
auto& [ref_src, tCrRow, tCcRow, gRow_l, cRow, gBuf_ml, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
tile_coord_mnkl, residue_mn, epi_tile, tiled_copy, thread_idx] = args_tuple;
Tensor tCrRow_mn = tCrRow(_,_,_,epi_m,epi_n);
Tensor tCcRow_mn = tCcRow(_,_,_,epi_m,epi_n);
using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>;
using ReduceInput = RegReduceFn<ElementCompute>;
ConvertInput convert_input{};
ReduceInput reduce_input{};
Array frg_I = convert_input(frg_input);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < FragmentSize; ++i) {
if constexpr (VisitCheckOOB) {
if (elem_less(tCcRow_mn(epi_v * FragmentSize + i), residue_mn)) {
ElementCompute& tCrRow_vmn = tCrRow_mn(epi_v * FragmentSize + i);
tCrRow_vmn = reduce_input(tCrRow_vmn, frg_I[i]);
}
}
else {
ElementCompute& tCrRow_vmn = tCrRow_mn(epi_v * FragmentSize + i);
tCrRow_vmn = reduce_input(tCrRow_vmn, frg_I[i]);
}
}
return frg_input;
}
template <class STensor, class SyncFn>
CUTLASS_DEVICE void
reduce(STensor&& smem_buffer, SyncFn const& sync_fn, int epi_m, int epi_n, bool is_last_iteration) {
if (not is_last_iteration) {
return;
}
auto& [ref_src, tCrRow, tCcRow, gRow_l, cRow, gBuf_ml, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
tile_coord_mnkl, residue_mn, epi_tile, tiled_copy, thread_idx] = args_tuple;
auto [m, n, k, l] = tile_coord_mnkl;
constexpr bool ReferenceSrc = decltype(ref_src)::value;
if constexpr (EnableNullptr) {
if (params.ptr_row == nullptr) {
return;
}
}
// fully OOB CTA in partially OOB cluster
if (not elem_less(cRow(_0{},_0{}), residue_mn)) {
return;
}
//
// 1. Warp shuffle reduction
//
using FragmentShuffle = Array<ElementCompute, sizeof(uint64_t) / sizeof(ElementCompute)>;
using ReduceShuffle = ShuffleReduceFn<FragmentShuffle>;
ReduceShuffle reduce_shuffle{};
Tensor tCrRow_frg = recast<FragmentShuffle>(filter(tCrRow));
CUTLASS_PRAGMA_UNROLL
for (int reduction_rows = size<0>(lane_layout_MN) / 2; reduction_rows > 0; reduction_rows /= 2) {
CUTLASS_PRAGMA_UNROLL
for (int frg_idx = 0; frg_idx < size(tCrRow_frg); ++frg_idx) {
uint64_t frg_shfl = reinterpret_cast<uint64_t&>(tCrRow_frg(frg_idx));
frg_shfl = __shfl_down_sync(0xFFFFFFFF, frg_shfl, lane_layout_MN(reduction_rows, _0{}));
tCrRow_frg(frg_idx) = reduce_shuffle(tCrRow_frg(frg_idx), reinterpret_cast<FragmentShuffle&>(frg_shfl));
}
}
bool is_reduced_lane = get<0>(lane_mn) == 0;
//
// 2. Atomic reduction
//
if constexpr (IsAtomic) {
// Filter so we don't issue redunant copies over stride-0 modes
Tensor tCrRow_flt = filter_zeros(tCrRow);
Tensor tCcRow_flt = make_tensor(tCcRow.data(), make_layout(tCrRow_flt.shape(), tCcRow.stride()));
Tensor tCgRow = sm90_partition_for_epilogue<ReferenceSrc>(gRow_l(_,_,l), epi_tile, tiled_copy, thread_idx);
Tensor tCgRow_flt = filter_zeros(tCgRow);
// NOTE: atomic reduction is performed in the output type
using ConvertOutput = NumericConverter<ElementOutput, ElementCompute, RoundStyle>;
using ReduceOutput = GmemReduceFn<ElementOutput>;
ConvertOutput convert_output{};
ReduceOutput reduce_output{};
if (is_reduced_lane) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(tCrRow_flt); ++i) {
if (elem_less(tCcRow_flt(i), residue_mn)) {
reduce_output(&tCgRow_flt(i), convert_output(tCrRow_flt(i)));
}
}
}
sync_fn();
}
//
// 2. One warp in M, skip threadblock smem reduction
//
else if constexpr (decltype(size<0>(warp_layout_MN))::value <= 1) {
// Dump warp reduction to gmem workspace
using ElementGmem = cute::conditional_t<FinalReduction, ElementCompute volatile, ElementCompute>;
Tensor tCgBuf = sm90_partition_for_epilogue<ReferenceSrc>(gBuf_ml(_,_,m,l), epi_tile, tiled_copy, thread_idx);
if (is_reduced_lane) {
// Filter so we don't issue redundant copies over stride-0 modes
// (only works if 0-strides are in same location, which is by construction)
copy_aligned(filter(tCrRow), recast<ElementGmem>(filter(tCgBuf)));
}
sync_fn();
}
//
// 2. Multiple warps in M, do threadblock smem reduction
//
else {
Tensor sBuf = make_tensor(make_smem_ptr<ElementCompute>(raw_pointer_cast(smem_buffer.data())), sBuf_layout);
static_assert(decltype(cosize(sBuf.layout()))::value * sizeof(ElementCompute) <=
decltype(cosize(smem_buffer.layout()))::value * sizeof(typename remove_cvref_t<STensor>::value_type),
"smem reduction buffer not large enough, use a larger epilogue tile");
// Dump warp reduction to smem workspace
Tensor tCsBuf = sm90_partition_for_epilogue<ReferenceSrc>(sBuf(_,_,get<0>(warp_mn)), epi_tile, tiled_copy, thread_idx);
if (is_reduced_lane) {
// Filter so we don't issue redunant copies over stride-0 modes
// (only works if 0-strides are in same location, which is by construction)
copy_aligned(filter(tCrRow), filter(tCsBuf));
}
sync_fn();
constexpr int SmemFragSize = cute::max(size_t{1}, sizeof(uint32_t) / sizeof(ElementCompute));
using FragmentSmem = Array<ElementCompute, SmemFragSize>;
using VectorSmem = uint_bit_t<sizeof_bits_v<FragmentSmem>>;
using ReduceSmem = GmemReduceFn<FragmentSmem>;
ReduceSmem reduce_smem{};
Tensor sBuf_frg = recast<FragmentSmem>(filter_zeros(sBuf));
Tensor sBuf_vec = recast<VectorSmem>(filter_zeros(sBuf));
constexpr int FragsPerRow = decltype(size<1>(sBuf_frg))::value;
// Do the threadblock smem reduction
CUTLASS_PRAGMA_UNROLL
for (int reduction_rows = size<0>(warp_layout_MN) / 2; reduction_rows > 1; reduction_rows /= 2) {
int FragsPerReduction = reduction_rows * FragsPerRow;
CUTLASS_PRAGMA_NO_UNROLL
for (int frg_idx = thread_idx; frg_idx < FragsPerReduction; frg_idx += size(tiled_copy)) {
FragmentSmem frg_smem = reduce_smem(sBuf_frg(frg_idx), sBuf_frg(frg_idx + FragsPerReduction));
sBuf_vec(frg_idx) = reinterpret_cast<VectorSmem&>(frg_smem);
}
sync_fn();
}
// Do final smem reduction and dump to gmem workspace
using VectorGmem = cute::conditional_t<FinalReduction, VectorSmem volatile, VectorSmem>;
Tensor gBuf_vec = recast<VectorGmem>(filter(gBuf_ml(_,_,m,l)));
CUTLASS_PRAGMA_NO_UNROLL
for (int frg_idx = thread_idx; frg_idx < FragsPerRow; frg_idx += size(tiled_copy)) {
FragmentSmem frg_smem = reduce_smem(sBuf_frg(frg_idx), sBuf_frg(frg_idx + FragsPerRow));
gBuf_vec(frg_idx) = reinterpret_cast<VectorSmem&>(frg_smem);
}
sync_fn();
}
//
// 3. Increment atomic counters to signal final gmem reduction
//
if constexpr (not IsAtomic && FinalReduction) {
// Ensure gmem writes are visible to other threads before incrementing counter
__threadfence();
sync_fn();
// Collective thread 0 increments atomic tile counter and copies value to smem
int* prev_tile_count = reinterpret_cast<int*>(raw_pointer_cast(smem_buffer.data()));
if (thread_idx == 0) {
*prev_tile_count = atomicAdd(¶ms.tile_counters[n], 1);
}
sync_fn();
// Broadcast tile count to other threads in CTA and determine final reduction status
do_final_reduction = *prev_tile_count == size<2>(gBuf_ml) * size<3>(gBuf_ml) - 1;
sync_fn();
}
}
CUTLASS_DEVICE void
end() {
//
// 4. Do final gmem reduction if necessary
//
if constexpr (not IsAtomic && FinalReduction) {
if (not do_final_reduction) {
return;
}
auto& [ref_src, tCrRow, tCcRow, gRow_l, cRow, gBuf_ml, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
tile_coord_mnkl, residue_mn, epi_tile, tiled_copy, thread_idx] = args_tuple;
using ReduceOutput = GmemReduceFn<ElementCompute>;
using ConvertOutput = NumericConverter<ElementOutput, ElementCompute, RoundStyle>;
ReduceOutput reduce_output{};
ConvertOutput convert_output{};
// Reduction over batches
if (size<2>(stride(gRow_l)) == 0) {
CUTLASS_PRAGMA_NO_UNROLL
for (int n = thread_idx; n < size<1>(gBuf_ml); n += size(tiled_copy)) {
Tensor tRgBuf_ml = gBuf_ml(_0{},n,_,_);
ElementCompute output = tRgBuf_ml(_0{});
CUTLASS_PRAGMA_NO_UNROLL
for (int ml = 1; ml < size(tRgBuf_ml); ++ml) {
output = reduce_output(output, tRgBuf_ml(ml));
}
if (elem_less(cRow(_0{},n), residue_mn)) {
gRow_l(_0{},n,_0{}) = convert_output(output);
}
}
}
// No reduction over batches
else {
CUTLASS_PRAGMA_NO_UNROLL
for (int n = thread_idx; n < size<1>(gBuf_ml); n += size(tiled_copy)) {
bool do_store = elem_less(cRow(_0{},n), residue_mn);
CUTLASS_PRAGMA_NO_UNROLL
for (int l = 0; l < size<3>(gBuf_ml); ++l) {
Tensor tRgBuf_m = gBuf_ml(_0{},n,_,l);
ElementCompute output = tRgBuf_m(_0{});
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 1; m < size(tRgBuf_m); ++m) {
output = reduce_output(output, tRgBuf_m(m));
}
if (do_store) {
gRow_l(_0{},n,l) = convert_output(output);
}
}
}
}
}
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
Layout ref_layout_MN = [&] () {
if constexpr (ReferenceSrc) { return get<0>(args.tiled_copy.get_layoutS_MN()); }
else { return get<0>(args.tiled_copy.get_layoutD_MN()); }
}(); // tile_mn -> tv_idx
// Get the MN layout + coord of lanes to determine shuffle reduction iterations
using _W = Int<decltype(args.tiled_copy)::TiledNumThr::value / NumThreadsPerWarp>;
Layout tv2lane = Layout<Shape<Int<NumThreadsPerWarp>,_W,_1>,Stride<_1,_0,_0>>{}; // tv_idx -> lane_idx
Layout ref2lane = composition(tv2lane, ref_layout_MN); // tile_mn -> lane_idx
Layout lane_layout_MN = make_layout(filter(get<0>(ref2lane)), filter(get<1>(ref2lane))); // lane_mn -> lane_idx
Layout inv_lane_layout_MN = right_inverse(lane_layout_MN); // lane_idx -> lane_mn
int lane_idx = canonical_lane_idx();
auto lane_mn = idx2crd(inv_lane_layout_MN(lane_idx), shape(lane_layout_MN));
// Get the MN layout + coord of warps to determine smem reduction iterations
Layout tv2warp = Layout<Shape<Int<NumThreadsPerWarp>,_W,_1>,Stride<_0,_1,_0>>{}; // tv_idx -> warp_idx
Layout ref2warp = composition(tv2warp, ref_layout_MN); // tile_mn -> warp_idx
Layout warp_layout_MN = make_layout(filter(get<0>(ref2warp)), filter(get<1>(ref2warp))); // warp_mn -> warp_idx
Layout inv_warp_layout_MN = right_inverse(warp_layout_MN); // warp_idx -> warp_mn
int warp_idx = args.thread_idx / NumThreadsPerWarp;
auto warp_mn = idx2crd(inv_warp_layout_MN(warp_idx), shape(warp_layout_MN));
// Partition output gmem and register tensors
auto [tile_M, tile_N, tile_K] = args.tile_shape_mnk;
auto [M, N, K, L] = args.problem_shape_mnkl;
auto [m, n, k, l] = args.tile_coord_mnkl;
Tensor mRow = make_tensor(make_gmem_ptr<ElementOutput>(params.ptr_row), make_shape(M,N,L), params.dRow); // (M,N,L)
Tensor gRow_l = local_tile(mRow, take<0,2>(args.tile_shape_mnk), make_coord(m,n,_)); // (CTA_M,CTA_N,L)
Tensor tCgRow = sm90_partition_for_epilogue<ReferenceSrc>( // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
gRow_l(_,_,l), args.epi_tile, args.tiled_copy, args.thread_idx);
Tensor tCrRow = make_tensor_like<ElementCompute>(tCgRow); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
fill(tCrRow, params.reduction_identity);
// Partition gmem+smem reduction buffer tensors
Layout gBuf_layout = make_layout(take<0,2>(args.tile_shape_mnk), make_stride(_0{}, _1{}));
auto block_shape = ceil_div(make_shape(M,N,L), shape(gBuf_layout)); // (M_CNT, N_CNT, L_CNT)
// Let the M_CNT (the num of partial reduction results) become the outer mode
Layout block_layout = make_layout(block_shape, make_stride(get<1>(block_shape), _1{}, get<0>(block_shape) * get<1>(block_shape)));
Layout mBuf_layout = blocked_product(gBuf_layout, block_layout);
Tensor mBuf = make_tensor(make_gmem_ptr(params.reduction_buffer), mBuf_layout); // (ceil_M,ceil_N,L)
Tensor gBuf_ml = local_tile(mBuf, take<0,2>(args.tile_shape_mnk), make_coord(_,n,_)); // (CTA_M,CTA_N,REST_M,L)
Layout sBuf_layout = blocked_product(gBuf_layout, // (CTA_M,CTA_N,WARPS_M)
make_layout(make_shape(_1{},_1{},size<0>(warp_layout_MN))));
auto args_tuple = make_tuple(
bool_constant<ReferenceSrc>{}, cute::move(tCrRow), args.tCcD, gRow_l, args.cD, gBuf_ml, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
args.tile_coord_mnkl, args.residue_mn, args.epi_tile, args.tiled_copy, args.thread_idx);
return ConsumerStoreCallbacks<decltype(args_tuple)>(cute::move(args_tuple), params);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Col vector reduction
template <
template <class> class RegReduceFn,
template <class> class ShuffleReduceFn,
template <class> class GmemReduceFn,
int Stages,
class CtaTileShapeMNK,
class ElementOutput,
class ElementCompute,
FloatRoundStyle RoundStyle,
class StrideMNL = Stride<_1,_0,_0>,
int Alignment = 128 / sizeof_bits_v<ElementOutput>,
bool EnableNullptr = true, // Noop on nullptr params
// If this is false, ptr_col is assumed to point to a compact m-major (round_nearest(M,CTA_M), ceil_div(N,CTA_N), L)
// tensor of ElementCompute. It is the user's responsibility to reduce this to a (M, L) tensor of ElementOutput
bool FinalReduction = true,
// False means skip OOB predication if OOB inputs are known to be the reduction identity
bool VisitCheckOOB = true
>
struct Sm90ColReduction {
private:
static_assert(Stages == 0, "Smem usage not supported yet");
static_assert(Alignment * sizeof_bits_v<ElementOutput> % 128 == 0, "sub-16B alignment not supported yet");
static_assert(
(cute::is_same_v<StrideMNL, Stride<_1,_0, _0>>) || // col vector reduction, e.g. per-row sum over all batches
(cute::is_same_v<StrideMNL, Stride<_1,_0,int>>)); // batched col vector reduction, e.g. per-row sum per batch
static constexpr bool IsAtomic = is_atomic<GmemReduceFn<ElementCompute>>::value;
static_assert(not (IsAtomic && not FinalReduction), "atomic reduction must be final");
public:
struct SharedStorage { };
struct Arguments {
void* ptr_col = nullptr; // ElementOutput* if FinalReduction, else ElementCompute*
ElementCompute reduction_identity = 0;
StrideMNL dCol = {};
};
struct Params {
void* ptr_col = nullptr;
ElementCompute reduction_identity = 0;
StrideMNL dCol = {};
ElementCompute* reduction_buffer = nullptr;
int* tile_counters = nullptr;
};
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) {
ElementCompute* reduction_buffer;
int* tile_counters = nullptr;
if constexpr (IsAtomic) {
reduction_buffer = nullptr;
}
else if constexpr (not FinalReduction) {
reduction_buffer = reinterpret_cast<ElementCompute*>(args.ptr_col);
}
else {
auto [M, N, K, L] = problem_shape;
auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{};
size_t tile_counters_offset = product(ceil_div(make_shape(M,N,L), make_shape(tile_M, tile_N))) * tile_M * sizeof(ElementCompute);
tile_counters_offset = round_nearest(tile_counters_offset, sizeof(int));
reduction_buffer = reinterpret_cast<ElementCompute*>(workspace);
tile_counters = reinterpret_cast<int*>(reinterpret_cast<uint8_t*>(workspace) + tile_counters_offset);
}
return {
args.ptr_col,
args.reduction_identity,
args.dCol,
reduction_buffer,
tile_counters
};
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
if constexpr (IsAtomic || not FinalReduction) {
return 0;
}
size_t workspace_size = 0;
auto [M, N, K, L] = problem_shape;
auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{};
// Increment by size of reduction buffer
workspace_size += product(ceil_div(make_shape(M,N,L), make_shape(tile_M, tile_N))) * tile_M * sizeof(ElementCompute);
// Align and increment by size of tile counters
workspace_size = round_nearest(workspace_size, sizeof(int));
workspace_size += cute::ceil_div(M, tile_M) * sizeof(int);
return workspace_size;
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
if constexpr (IsAtomic) {
auto [M, N, K, L] = problem_shape;
Layout mCol_layout = make_layout(make_shape(M,N,L), args.dCol);
if (args.ptr_col != nullptr) {
return fill_workspace(args.ptr_col, ElementOutput(args.reduction_identity), cosize(mCol_layout), stream, cuda_adapter);
}
return Status::kSuccess;
}
auto [M, N, K, L] = problem_shape;
auto [tile_M, tile_N, tile_K] = CtaTileShapeMNK{};
size_t tile_counters_offset = product(ceil_div(make_shape(M,N,L), make_shape(tile_M, tile_N))) * tile_M * sizeof(ElementCompute);
tile_counters_offset = round_nearest(tile_counters_offset, sizeof(int));
int* tile_counters = reinterpret_cast<int*>(reinterpret_cast<uint8_t*>(workspace) + tile_counters_offset);
size_t tile_counters_size = cute::ceil_div(M, tile_M) * sizeof(int);
return zero_workspace(tile_counters, tile_counters_size, stream);
}
CUTLASS_DEVICE bool
is_producer_load_needed() const {
return false;
}
CUTLASS_DEVICE bool
is_C_load_needed() const {
return false;
}
CUTLASS_HOST_DEVICE
Sm90ColReduction() { }
CUTLASS_HOST_DEVICE
Sm90ColReduction(Params const& params, SharedStorage const& shared_storage)
: params(params) { }
Params params;
template <class... Args>
CUTLASS_DEVICE auto
get_producer_load_callbacks(ProducerLoadArgs<Args...> const& args) {
return EmptyProducerLoadCallbacks{};
}
template<class ArgsTuple>
struct ConsumerStoreCallbacks : EmptyConsumerStoreCallbacks {
CUTLASS_DEVICE
ConsumerStoreCallbacks(ArgsTuple&& args_tuple, Params const& params)
: args_tuple(cute::forward<ArgsTuple>(args_tuple)),
params(params) {}
ArgsTuple args_tuple;
Params const& params;
bool do_final_reduction = false;
template <typename ElementAccumulator, typename ElementInput, int FragmentSize>
CUTLASS_DEVICE auto
visit(Array<ElementAccumulator, FragmentSize> const& frg_acc, int epi_v, int epi_m, int epi_n,
Array<ElementInput, FragmentSize> const& frg_input) {
if constexpr (EnableNullptr) {
if (params.ptr_col == nullptr) {
return frg_input;
}
}
auto& [ref_src, tCrCol, tCcCol, gCol_l, cCol, gBuf_nl, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
tile_coord_mnkl, residue_mn, epi_tile, tiled_copy, thread_idx] = args_tuple;
Tensor tCrCol_mn = tCrCol(_,_,_,epi_m,epi_n);
Tensor tCcCol_mn = tCcCol(_,_,_,epi_m,epi_n);
using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>;
using ReduceInput = RegReduceFn<ElementCompute>;
ConvertInput convert_input{};
ReduceInput reduce_input{};
Array frg_I = convert_input(frg_input);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < FragmentSize; ++i) {
if constexpr (VisitCheckOOB) {
if (elem_less(tCcCol_mn(epi_v * FragmentSize + i), residue_mn)) {
ElementCompute& tCrCol_vmn = tCrCol_mn(epi_v * FragmentSize + i);
tCrCol_vmn = reduce_input(tCrCol_vmn, frg_I[i]);
}
}
else {
if (elem_less(tCcCol_mn(epi_v * FragmentSize + i), residue_mn)) {
ElementCompute& tCrCol_vmn = tCrCol_mn(epi_v * FragmentSize + i);
tCrCol_vmn = reduce_input(tCrCol_vmn, frg_I[i]);
}
}
}
return frg_input;
}
template <class STensor, class SyncFn>
CUTLASS_DEVICE void
reduce(STensor&& smem_buffer, SyncFn const& sync_fn, int epi_m, int epi_n, bool is_last_iteration) {
if (not is_last_iteration) {
return;
}
auto& [ref_src, tCrCol, tCcCol, gCol_l, cCol, gBuf_nl, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
tile_coord_mnkl, residue_mn, epi_tile, tiled_copy, thread_idx] = args_tuple;
auto [m, n, k, l] = tile_coord_mnkl;
constexpr bool ReferenceSrc = decltype(ref_src)::value;
// Runtime nullptr is noop
if constexpr (EnableNullptr) {
if (params.ptr_col == nullptr) {
return;
}
}
// fully OOB CTA in partially OOB cluster
if (not elem_less(cCol(_0{},_0{}), residue_mn)) {
return;
}
//
// 1. Warp shuffle reduction
//
using FragmentShuffle = Array<ElementCompute, sizeof(uint64_t) / sizeof(ElementCompute)>;
using ReduceShuffle = ShuffleReduceFn<FragmentShuffle>;
ReduceShuffle reduce_shuffle{};
Tensor tCrCol_frg = recast<FragmentShuffle>(filter(tCrCol));
CUTLASS_PRAGMA_UNROLL
for (int reduction_cols = size<1>(lane_layout_MN) / 2; reduction_cols > 0; reduction_cols /= 2) {
CUTLASS_PRAGMA_UNROLL
for (int frg_idx = 0; frg_idx < size(tCrCol_frg); ++frg_idx) {
uint64_t frg_shfl = reinterpret_cast<uint64_t&>(tCrCol_frg(frg_idx));
frg_shfl = __shfl_down_sync(0xFFFFFFFF, frg_shfl, lane_layout_MN(_0{},reduction_cols));
tCrCol_frg(frg_idx) = reduce_shuffle(tCrCol_frg(frg_idx), reinterpret_cast<FragmentShuffle&>(frg_shfl));
}
}
bool is_reduced_lane = get<1>(lane_mn) == 0;
//
// 2. Atomic reduction
//
if constexpr (IsAtomic) {
// Filter so we don't issue redunant copies over stride-0 modes
Tensor tCrCol_flt = filter_zeros(tCrCol);
Tensor tCcCol_flt = make_tensor(tCcCol.data(), make_layout(tCrCol_flt.shape(), tCcCol.stride()));
Tensor tCgCol = sm90_partition_for_epilogue<ReferenceSrc>(gCol_l(_,_,l), epi_tile, tiled_copy, thread_idx);
Tensor tCgCol_flt = filter_zeros(tCgCol);
// NOTE: atomic reduction is performed in the output type
using ConvertOutput = NumericConverter<ElementOutput, ElementCompute, RoundStyle>;
using ReduceOutput = GmemReduceFn<ElementOutput>;
ConvertOutput convert_output{};
ReduceOutput reduce_output{};
if (is_reduced_lane) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(tCrCol_flt); ++i) {
if (elem_less(tCcCol_flt(i), residue_mn)) {
reduce_output(&tCgCol_flt(i), convert_output(tCrCol_flt(i)));
}
}
}
sync_fn();
}
//
// 2. One warp in N, skip threadblock smem reduction
//
else if constexpr (decltype(size<1>(warp_layout_MN))::value <= 1) {
// Dump warp reduction to gmem workspace
using ElementGmem = cute::conditional_t<FinalReduction, ElementCompute volatile, ElementCompute>;
Tensor tCgBuf = sm90_partition_for_epilogue<ReferenceSrc>(gBuf_nl(_,_,n,l), epi_tile, tiled_copy, thread_idx);
if (is_reduced_lane) {
// Filter so we don't issue redundant copies over stride-0 modes
// (only works if 0-strides are in same location, which is by construction)
copy_aligned(filter(tCrCol), recast<ElementGmem>(filter(tCgBuf)));
}
sync_fn();
}
//
// 2. Multiple warps in N, do threadblock smem reduction
//
else {
Tensor sBuf = make_tensor(make_smem_ptr<ElementCompute>(raw_pointer_cast(smem_buffer.data())), sBuf_layout);
static_assert(decltype(cosize(sBuf.layout()))::value * sizeof(ElementCompute) <=
decltype(cosize(smem_buffer.layout()))::value * sizeof(typename remove_cvref_t<STensor>::value_type),
"smem reduction buffer not large enough, use a larger epilogue tile");
// Dump warp reduction to smem workspace
Tensor tCsBuf = sm90_partition_for_epilogue<ReferenceSrc>(sBuf(_,_,get<1>(warp_mn)), epi_tile, tiled_copy, thread_idx);
if (is_reduced_lane) {
// Filter so we don't issue redunant copies over stride-0 modes
// (only works if 0-strides are in same location, which is by construction)
copy_aligned(filter(tCrCol), filter(tCsBuf));
}
sync_fn();
constexpr int SmemFragSize = cute::max(size_t{1}, sizeof(uint32_t) / sizeof(ElementCompute));
using FragmentSmem = Array<ElementCompute, SmemFragSize>;
using VectorSmem = uint_bit_t<sizeof_bits_v<FragmentSmem>>;
using ReduceSmem = GmemReduceFn<FragmentSmem>;
ReduceSmem reduce_smem{};
Tensor sBuf_frg = recast<FragmentSmem>(filter_zeros(sBuf));
Tensor sBuf_vec = recast<VectorSmem>(filter_zeros(sBuf));
constexpr int FragsPerCol = decltype(size<0>(sBuf_frg))::value;
// Do the threadblock smem reduction
CUTLASS_PRAGMA_UNROLL
for (int reduction_cols = size<1>(warp_layout_MN) / 2; reduction_cols > 1; reduction_cols /= 2) {
int FragsPerReduction = reduction_cols * FragsPerCol;
CUTLASS_PRAGMA_NO_UNROLL
for (int frg_idx = thread_idx; frg_idx < FragsPerReduction; frg_idx += size(tiled_copy)) {
FragmentSmem frg_smem = reduce_smem(sBuf_frg(frg_idx), sBuf_frg(frg_idx + FragsPerReduction));
sBuf_vec(frg_idx) = reinterpret_cast<VectorSmem&>(frg_smem);
}
sync_fn();
}
// Do final smem reduction and dump to gmem workspace
using VectorGmem = cute::conditional_t<FinalReduction, VectorSmem volatile, VectorSmem>;
Tensor gBuf_vec = recast<VectorGmem>(filter(gBuf_nl(_,_,n,l)));
CUTLASS_PRAGMA_NO_UNROLL
for (int frg_idx = thread_idx; frg_idx < FragsPerCol; frg_idx += size(tiled_copy)) {
FragmentSmem frg_smem = reduce_smem(sBuf_frg(frg_idx), sBuf_frg(frg_idx + FragsPerCol));
gBuf_vec(frg_idx) = reinterpret_cast<VectorSmem&>(frg_smem);
}
sync_fn();
}
//
// 3. Increment atomic counters to signal final gmem reduction
//
if constexpr (not IsAtomic && FinalReduction) {
// Ensure gmem writes are visible to other threads before incrementing counter
__threadfence();
sync_fn();
// Collective thread 0 increments atomic tile counter and copies value to smem
int* prev_tile_count = reinterpret_cast<int*>(raw_pointer_cast(smem_buffer.data()));
if (thread_idx == 0) {
*prev_tile_count = atomicAdd(¶ms.tile_counters[m], 1);
}
sync_fn();
// Broadcast tile count to other threads in CTA and determine final reduction status
do_final_reduction = *prev_tile_count == size<2>(gBuf_nl) * size<3>(gBuf_nl) - 1;
sync_fn();
}
}
CUTLASS_DEVICE void
end() {
//
// 4. Do final gmem reduction if necessary
//
if constexpr (not IsAtomic && FinalReduction) {
if (not do_final_reduction) {
return;
}
auto& [ref_src, tCrCol, tCcCol, gCol_l, cCol, gBuf_nl, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
tile_coord_mnkl, residue_mn, epi_tile, tiled_copy, thread_idx] = args_tuple;
using ReduceOutput = GmemReduceFn<ElementCompute>;
using ConvertOutput = NumericConverter<ElementOutput, ElementCompute, RoundStyle>;
ReduceOutput reduce_output{};
ConvertOutput convert_output{};
// Reduction over batches
if (size<2>(stride(gCol_l)) == 0) {
CUTLASS_PRAGMA_NO_UNROLL
for (int m = thread_idx; m < size<0>(gBuf_nl); m += size(tiled_copy)) {
Tensor tRgBuf_nl = gBuf_nl(m,_0{},_,_);
ElementCompute output = tRgBuf_nl(_0{});
CUTLASS_PRAGMA_NO_UNROLL
for (int nl = 1; nl < size(tRgBuf_nl); ++nl) {
output = reduce_output(output, tRgBuf_nl(nl));
}
if (elem_less(cCol(m,_0{}), residue_mn)) {
gCol_l(m,_0{},_0{}) = convert_output(output);
}
}
}
// No reduction over batches
else {
CUTLASS_PRAGMA_NO_UNROLL
for (int m = thread_idx; m < size<0>(gBuf_nl); m += size(tiled_copy)) {
bool do_store = elem_less(cCol(m,_0{}), residue_mn);
CUTLASS_PRAGMA_NO_UNROLL
for (int l = 0; l < size<3>(gBuf_nl); ++l) {
Tensor tRgBuf_n = gBuf_nl(m,_0{},_,l);
ElementCompute output = tRgBuf_n(_0{});
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 1; n < size(tRgBuf_n); ++n) {
output = reduce_output(output, tRgBuf_n(n));
}
if (do_store) {
gCol_l(m,_0{},l) = convert_output(output);
}
}
}
}
}
}
};
template <
bool ReferenceSrc, // do register tensors reference the src or dst layout of the tiled copy
class... Args
>
CUTLASS_DEVICE auto
get_consumer_store_callbacks(ConsumerStoreArgs<Args...> const& args) {
Layout ref_layout_MN = [&] () {
if constexpr (ReferenceSrc) { return get<0>(args.tiled_copy.get_layoutS_MN()); }
else { return get<0>(args.tiled_copy.get_layoutD_MN()); }
}(); // tile_mn -> tv_idx
// Get the MN layout + coord of lanes to determine shuffle reduction iterations
using _W = Int<decltype(args.tiled_copy)::TiledNumThr::value / NumThreadsPerWarp>;
Layout tv2lane = Layout<Shape<Int<NumThreadsPerWarp>,_W,_1>,Stride<_1,_0,_0>>{}; // tv_idx -> lane_idx
Layout ref2lane = composition(tv2lane, ref_layout_MN); // tile_mn -> lane_idx
Layout lane_layout_MN = make_layout(filter(get<0>(ref2lane)), filter(get<1>(ref2lane))); // lane_mn -> lane_idx
Layout inv_lane_layout_MN = right_inverse(lane_layout_MN); // lane_idx -> lane_mn
int lane_idx = canonical_lane_idx();
auto lane_mn = idx2crd(inv_lane_layout_MN(lane_idx), shape(lane_layout_MN));
// Get the MN layout + coord of warps to determine smem reduction iterations
Layout tv2warp = Layout<Shape<Int<NumThreadsPerWarp>,_W,_1>,Stride<_0,_1,_0>>{}; // tv_idx -> warp_idx
Layout ref2warp = composition(tv2warp, ref_layout_MN); // tile_mn -> warp_idx
Layout warp_layout_MN = make_layout(filter(get<0>(ref2warp)), filter(get<1>(ref2warp))); // warp_mn -> warp_idx
Layout inv_warp_layout_MN = right_inverse(warp_layout_MN); // warp_idx -> warp_mn
int warp_idx = args.thread_idx / NumThreadsPerWarp;
auto warp_mn = idx2crd(inv_warp_layout_MN(warp_idx), shape(warp_layout_MN));
// Partition output gmem and register tensors
auto [tile_M, tile_N, tile_K] = args.tile_shape_mnk;
auto [M, N, K, L] = args.problem_shape_mnkl;
auto [m, n, k, l] = args.tile_coord_mnkl;
Tensor mCol = make_tensor(make_gmem_ptr<ElementOutput>(params.ptr_col), make_shape(M,N,L), params.dCol); // (M,N,L)
Tensor gCol_l = local_tile(mCol, take<0,2>(args.tile_shape_mnk), make_coord(m,n,_)); // (CTA_M,CTA_N,L)
Tensor tCgCol = sm90_partition_for_epilogue<ReferenceSrc>( // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
gCol_l(_,_,l), args.epi_tile, args.tiled_copy, args.thread_idx);
Tensor tCrCol = make_tensor_like<ElementCompute>(tCgCol); // (CPY,CPY_M,CPY_N,EPI_M,EPI_N)
fill(tCrCol, params.reduction_identity);
// Partition gmem+smem reduction buffer tensors
Layout gBuf_layout = make_layout(take<0,2>(args.tile_shape_mnk), make_stride(_1{}, _0{}));
Layout mBuf_layout = blocked_product(gBuf_layout, make_layout(ceil_div(make_shape(M,N,L), shape(gBuf_layout))));
Tensor mBuf = make_tensor(make_gmem_ptr(params.reduction_buffer), mBuf_layout); // (ceil_M,ceil_N,L)
Tensor gBuf_nl = local_tile(mBuf, take<0,2>(args.tile_shape_mnk), make_coord(m,_,_)); // (CTA_M,CTA_N,REST_N,L)
Layout sBuf_layout = blocked_product(gBuf_layout,make_layout(make_shape(_1{},_1{},size<1>(warp_layout_MN)))); // (CTA_M,CTA_N,WARPS_N)
auto args_tuple = make_tuple(
bool_constant<ReferenceSrc>{}, cute::move(tCrCol), args.tCcD, gCol_l, args.cD, gBuf_nl, sBuf_layout,
lane_layout_MN, lane_mn, warp_layout_MN, warp_mn,
args.tile_coord_mnkl, args.residue_mn, args.epi_tile, args.tiled_copy, args.thread_idx);
return ConsumerStoreCallbacks<decltype(args_tuple)>(std::move(args_tuple), params);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Batch matrix reduction
template <
int Stages,
class EpilogueTile,
class Element,
class StrideMNL,
class CopyOpR2S,
class SmemLayoutAtom,
int Alignment = 128 / sizeof_bits_v<Element>,
bool EnableNullptr = true // Noop on nullptr params
>
struct Sm90MatrixReduction;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::fusion
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/fusion/sm90_visitor_store_tma_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/epilogue/fusion/sm90_visitor_store_tma_warpspecialized.hpp",
"repo_id": "include",
"token_count": 25760
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
struct LinearCombinationParams {
uint64_t alpha_data[2];
uint64_t beta_data[2];
CUTLASS_HOST_DEVICE
LinearCombinationParams()
: alpha_data {0lu, 0lu}, beta_data {0lu, 0lu}
{ }
template <typename ElementCompute>
CUTLASS_HOST_DEVICE
LinearCombinationParams(ElementCompute alpha, ElementCompute beta)
: alpha_data {0lu, 0lu}, beta_data {0lu, 0lu}
{
#if defined(__CUDA_ARCH__)
reinterpret_cast<ElementCompute&>(alpha_data) = alpha;
reinterpret_cast<ElementCompute&>(beta_data) = beta;
#else
memcpy( alpha_data, &alpha, sizeof(ElementCompute) );
memcpy( beta_data, &beta, sizeof(ElementCompute) );
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/linear_combination_params.h/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_params.h",
"repo_id": "include",
"token_count": 799
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_relu0.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_hardswish.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_conv.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator_mixed.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/threadblock/interleaved_epilogue.h"
#include "cutlass/layout/permute.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename ElementOutput,
typename ElementAccumulator,
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
ElementAccumulator,
layout::RowMajor
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
ElementAccumulator
>;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for float <= float x 4
template <
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<float, float, 4, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
float,
layout::RowMajor
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
float
>;
static int const kFragmentsPerIteration = 2;
};
/// Partial specialization for int32_t <= int32_t
template <
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<int32_t, int32_t, ElementsPerAccess, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
int32_t,
layout::RowMajor
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
int32_t
>;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for float <= int32_t
template <
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<float, int32_t, ElementsPerAccess, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
int32_t,
layout::RowMajor
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
int32_t
>;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for half <= float x 8 epilogues avoids shared memory bank conflicts.
template <
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
half_t,
float,
8,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
float,
32,
16,
8,
8
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
float,
32,
16,
8,
8
>;
static int const kFragmentsPerIteration = 2;
};
/// Partial specialization for half <= int32_t x 8 epilogues avoids shared memory bank conflicts.
template <
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
half_t,
int32_t,
8,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
int32_t,
32,
16,
8,
8
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
int32_t,
32,
16,
8,
8
>;
static int const kFragmentsPerIteration = 2;
};
/// Partial specialization for int8/int4b_t <= int32 x 16/8 epilogues avoids shared memory bank conflicts.
/// Threadblock::kN = 256 still has bank conflicts.
template <
typename ElementOutput,
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
ElementOutput,
int32_t,
ElementsPerAccess,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
static_assert(platform::is_same<ElementOutput, cutlass::int4b_t>::value ||
platform::is_same<ElementOutput, cutlass::uint4b_t>::value ||
platform::is_same<ElementOutput, int8_t>::value ||
platform::is_same<ElementOutput, uint8_t>::value,
"ElementOutput needs to be 4 or 8 bit (unsigned) int.");
static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8 || ElementsPerAccess == 4),
"ElementsPerAccess needs to be 16 or 8.");
using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
int32_t,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
int32_t,
layout::RowMajor
>;
using WarpTileIterator = typename platform::conditional<
(ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4),
WarpTileIteratorNotMixed,
WarpTileIteratorMixed>::type;
using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
int32_t,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
int32_t
>;
using SharedLoadIterator = typename platform::conditional<
(ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4),
SharedLoadIteratorNotMixed,
SharedLoadIteratorMixed>::type;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for float_e4m3_t <= float x 16/8 epilogues avoids shared memory bank conflicts.
/// Threadblock::kN = 256 still has bank conflicts.
template <
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
cutlass::float_e4m3_t,
float,
ElementsPerAccess,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
using ElementOutput = cutlass::float_e4m3_t;
static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8 || ElementsPerAccess == 4),
"ElementsPerAccess needs to be 16 or 8.");
using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
float,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
float,
layout::RowMajor
>;
using WarpTileIterator = typename platform::conditional<
(ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4),
WarpTileIteratorNotMixed,
WarpTileIteratorMixed>::type;
using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
float,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
float
>;
using SharedLoadIterator = typename platform::conditional<
(ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4),
SharedLoadIteratorNotMixed,
SharedLoadIteratorMixed>::type;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for float_e5m2_t <= float x 16/8 epilogues avoids shared memory bank conflicts.
/// Threadblock::kN = 256 still has bank conflicts.
template <
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
cutlass::float_e5m2_t,
float,
ElementsPerAccess,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
using ElementOutput = cutlass::float_e5m2_t;
static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8 || ElementsPerAccess == 4),
"ElementsPerAccess needs to be 16 or 8.");
using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
float,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
float,
layout::RowMajor
>;
using WarpTileIterator = typename platform::conditional<
(ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4),
WarpTileIteratorNotMixed,
WarpTileIteratorMixed>::type;
using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
float,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
float
>;
using SharedLoadIterator = typename platform::conditional<
(ThreadblockShape::kN == 256) || (ThreadblockShape::kN == 128 && ElementsPerAccess == 8) || (ElementsPerAccess == 4),
SharedLoadIteratorNotMixed,
SharedLoadIteratorMixed>::type;
static int const kFragmentsPerIteration = 1;
};
} // namespace detail
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess,
bool ScatterD = false,
typename PermuteDLayout = layout::NoPermute,
conv::StrideSupport StrideSupport = conv::StrideSupport::kUnity,
int Rank = 4
>
struct DefaultEpilogueTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
static conv::StrideSupport const kStrideSupport = StrideSupport;
static int const kRank = Rank;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
static bool const UseCUDAStore = platform::is_same<ElementOutput, double>::value;
using PackedOutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout,
UseCUDAStore
>;
using StridedOutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorConv<
OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout,
UseCUDAStore,
kRank
>;
using OutputTileIterator = typename platform::conditional<StrideSupport == cutlass::conv::StrideSupport::kUnity,
PackedOutputTileIterator,
StridedOutputTileIterator>::type;
using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>,
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC> >::type;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput,
ElementAccumulator,
kElementsPerAccess,
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap
>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1);
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding,
kFragmentsPerIteration
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueTensorOpStridedDgrad {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>,
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC> >::type;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput,
ElementAccumulator,
kElementsPerAccess,
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap
>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1);
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding,
kFragmentsPerIteration
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
int Rank,
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueTensorOpAffineRankN {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankN<
OutputTileThreadMap,
ElementOutput,
Rank
>;
// Map to the row major iterator since the iterator selection for affineN is the same.
using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
layout::RowMajor>,
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
layout::RowMajor> >::type;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput,
ElementAccumulator,
kElementsPerAccess,
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap
>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1);
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding,
kFragmentsPerIteration
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps which uses
/// intereleaved output layout. For this case, shared memory is not needed.
template <typename Shape_, typename WarpMmaTensorOp_, int PartitionsK,
typename OutputOp_, int ElementsPerAccess, int InterleavedK,
bool isSplitK = false>
struct DefaultInterleavedEpilogueTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::
DefaultInterleavedThreadMapTensorOp<
Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput,
kElementsPerAccess, InterleavedK>::Type;
using OutputTileIterator =
cutlass::epilogue::threadblock::InterleavedPredicatedTileIterator<
OutputTileThreadMap, ElementOutput, InterleavedK>;
using AccumulatorFragmentIterator =
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::InterleavedEpilogue<
Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator,
AccumulatorFragmentIterator, OutputOp, InterleavedK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps which uses
/// intereleaved output layout. For this case, shared memory is not needed.
template <typename Shape_, typename WarpMmaTensorOp_, int PartitionsK,
typename OutputOp_, int ElementsPerAccess, int InterleavedK,
bool isSplitK = false>
struct DefaultInterleavedConvEpilogue {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::
DefaultInterleavedConvThreadMapTensorOp<
Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput,
kElementsPerAccess, InterleavedK>::Type;
using OutputTileIterator =
cutlass::epilogue::threadblock::InterleavedConvPredicatedTileIterator<
OutputTileThreadMap, ElementOutput, InterleavedK>;
using AccumulatorFragmentIterator =
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
// can reuse the gemm version here to do element selection
layout::ColumnMajorInterleaved<InterleavedK>>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::InterleavedEpilogue<
Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator,
AccumulatorFragmentIterator, OutputOp, InterleavedK>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/default_epilogue_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/default_epilogue_tensor_op.h",
"repo_id": "include",
"token_count": 10917
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs and convolution using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_ ///< Output operator
>
class EpilogueDirectStore {
public:
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
using WarpShape = typename WarpMmaOperator_::Shape;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using OutputOp = OutputOp_;
using Padding = MatrixShape<0, 0>;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
kPartitionsK
>;
/// Use this to control the granularity of one epilogue 'iteration'
static int const kFragmentsPerIteration = 1;
static int constexpr kSmemTiles = 1;
static int constexpr kSmemPointerOffset = 0;
/// Shared storage allocation needed by the epilogue
struct SharedStorage { } ;
private:
// Assume accumulator tile is multipile interleaved 32x32 tile.
static int const kElementsPerPartial = 4;
using EleShapePerPatial = typename platform::conditional<
platform::is_same<ElementAccumulator, float>::value,
MatrixShape<2, 2>,
MatrixShape<1, 4> >::type;
static int const kElementsPerMma = 8;
static int const kAccumulatorPatials = 2;
using QuadShapePerPatialMma = MatrixShape<4, 4>;
static_assert(OutputOp::kCount >= 2,
"The direct store epilogue for Tensor Ops requires the output functor have kCount >= 2.");
private:
LongIndex warp_offset;
int thread_idx;
int warp_idx;
int lane_idx;
int warp_m, warp_n; // warp coordinates within a cta
int tid_m, tid_n; // thread coordinates within a warp
public:
/// Constructor
CUTLASS_DEVICE
EpilogueDirectStore(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx_, ///< ID of a thread within the threadblock
int warp_idx_, ///< ID of warp within threadblock
int lane_idx_ ///< Id of thread within warp
):
thread_idx(thread_idx_),
warp_idx(warp_idx_),
lane_idx(lane_idx_)
{
// warp offsetting calculations
warp_offset = warp_idx * WarpShape::kM * WarpShape::kN;
int warp_id_mn = warp_idx % (WarpCount::kM * WarpShape::kN);
warp_m = warp_id_mn % WarpCount::kM;
warp_n = warp_id_mn / WarpCount::kM;
MatrixCoord warp_offset_coord(warp_m*WarpShape::kM, warp_n*WarpShape::kN);
// thread offsetting calculations
int quad = (lane_idx >> 2);
int lane_in_quad = (lane_idx & 3);
// this seems to be te correct layout
tid_m = quad;
tid_n = 2 * lane_in_quad;
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
if (!output_op.is_source_needed()) {
compute_source_not_needed_(output_op, destination_iterator, accumulators);
}
else {
compute_source_needed_(output_op, destination_iterator, accumulators, source_iterator);
}
}
private:
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
const int kAccumBlockN = 2;
const int kThreadsM = 8;
const int kThreadsN = 4;
const int kBlockM = WarpShape::kM / kThreadsM;
/// Array type used to output
using OutputAccessType = AlignedArray<ElementOutput, kAccumBlockN>;
/// Array type passed to the output operator - unused elements are optimized away
using OutputFragmentType = Array<ElementOutput, OutputOp::kCount>;
/// Array type used by output functor
using AccumulatorAccessType = Array<ElementAccumulator, kAccumBlockN>;
/// Array type used by output functor
using AccumulatorFragmentType = Array<ElementAccumulator, OutputOp::kCount>;
AccumulatorAccessType const *accumulator_pair = reinterpret_cast<AccumulatorAccessType const *>(&accumulators);
CUTLASS_PRAGMA_UNROLL
for (int accum_m_idx = 0; accum_m_idx < WarpShape::kM / kThreadsM; accum_m_idx++) {
int accum_m = kThreadsM * accum_m_idx;
int mL = destination_iterator.threadblock_offset.row() + WarpShape::kM * warp_m + tid_m + accum_m;
int nL_base = destination_iterator.threadblock_offset.column() + WarpShape::kN * warp_n + tid_n;
ElementOutput *output_ptr = destination_iterator.pointer + mL * destination_iterator.stride;
ElementOutput *source_ptr = source_iterator.pointer + mL * source_iterator.stride;
int const kIterationsN = WarpShape::kN / kThreadsN / kAccumBlockN;
CUTLASS_PRAGMA_UNROLL
for (int accum_n_idx = 0; accum_n_idx < kIterationsN; accum_n_idx++) {
int accum_idx = accum_m_idx + kBlockM * accum_n_idx;
int accum_n = kThreadsM * accum_n_idx;
// mL and nL are logical coordinate in 2D mapping of epilogue's 4D output
int nL = nL_base + accum_n;
bool guard = (mL < destination_iterator.extent.row()) && (nL < destination_iterator.extent.column());
AccumulatorFragmentType accum_fragment;
reinterpret_cast<AccumulatorAccessType &>(accum_fragment) = accumulator_pair[accum_idx];
OutputFragmentType output_fragment;
if(guard) {
reinterpret_cast<OutputAccessType &>(output_fragment) =
*reinterpret_cast<OutputAccessType const *>(source_ptr + nL);
}
// Perform output operator
output_fragment = output_op(accum_fragment, output_fragment);
if(guard) {
// Store
*reinterpret_cast<OutputAccessType *>(output_ptr + nL) = reinterpret_cast<OutputAccessType const &>(output_fragment);
}
}
}
}
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
const int kAccumBlockN = 2;
const int kThreadsM = 8;
const int kThreadsN = 4;
const int kBlockM = WarpShape::kM / kThreadsM;
/// Array type used to output
using OutputAccessType = AlignedArray<ElementOutput, kAccumBlockN>;
/// Array type passed to the output operator - unused elements are optimized away
using OutputFragmentType = Array<ElementOutput, OutputOp::kCount>;
/// Array type used by output functor
using AccumulatorAccessType = Array<ElementAccumulator, kAccumBlockN>;
/// Array type used by output functor
using AccumulatorFragmentType = Array<ElementAccumulator, OutputOp::kCount>;
AccumulatorAccessType const *accumulator_pair = reinterpret_cast<AccumulatorAccessType const *>(&accumulators);
CUTLASS_PRAGMA_UNROLL
for (int accum_m_idx = 0; accum_m_idx < WarpShape::kM / kThreadsM; accum_m_idx++) {
int accum_m = kThreadsM * accum_m_idx;
int mL = destination_iterator.threadblock_offset.row() + WarpShape::kM * warp_m + tid_m + accum_m;
int nL_base = destination_iterator.threadblock_offset.column() + WarpShape::kN * warp_n + tid_n;
ElementOutput *output_ptr = destination_iterator.pointer + mL * destination_iterator.stride;
int const kIterationsN = WarpShape::kN / kThreadsN / kAccumBlockN;
CUTLASS_PRAGMA_UNROLL
for (int accum_n_idx = 0; accum_n_idx < kIterationsN; accum_n_idx++) {
int accum_idx = accum_m_idx + kBlockM * accum_n_idx;
int accum_n = kThreadsM * accum_n_idx;
// mL and nL are logical coordinate in 2D mapping of epilogue's 4D output
int nL = nL_base + accum_n;
bool guard = (mL < destination_iterator.extent.row()) && (nL < destination_iterator.extent.column());
AccumulatorFragmentType accum_fragment;
reinterpret_cast<AccumulatorAccessType &>(accum_fragment) = accumulator_pair[accum_idx];
OutputFragmentType output_fragment;
// Perform output operator
output_fragment = output_op(accum_fragment);
if(guard) {
// Store
*reinterpret_cast<OutputAccessType *>(output_ptr + nL) =
reinterpret_cast<OutputAccessType const &>(output_fragment);
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_direct_store.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_direct_store.h",
"repo_id": "include",
"token_count": 4855
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a class for using IEEE half-precision floating-point types in host or
device code.
*/
#pragma once
// FP8 types are available starting CUDA 11.8+
#if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))
#define CUDA_FP8_ENABLED 1
#endif
#if defined(__CUDA_ARCH__)
# if (__CUDA_ARCH__ >= 900)
# if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))
# define CUDA_PTX_FP8_CVT_ENABLED 1
# endif // (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))
# elif (__CUDA_ARCH__ == 890)
# if (__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 1))
# define CUDA_PTX_FP8_CVT_ENABLED 1
# endif // (__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 1))
# endif // (__CUDA_ARCH__ >= 900)
#endif // defined(__CUDA_ARCH__)
#ifdef __GNUC__
// Ignore checks on reinterpret-casts that are being used for bitcasts.
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDACC_RTC__)
#include "cutlass/floating_point_nvrtc.h"
#else
//
// Standard Library headers belong here to avoid conflicts with NVRTC.
//
#include <cmath>
#include <limits>
#include <cstdint>
#include <cstring>
#endif
#ifdef CUDA_FP8_ENABLED
#include <cuda_fp8.h>
#endif
#include <cuda_fp16.h>
#include "cutlass/cutlass.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// FP8 Has 2 encodings possible : E4M3 and E5M2
//
// E4M3 : 7 | 6 5 4 3 | 2 1 0
// E5M2 : 7 | 6 5 4 3 2 | 1 0
//
///////////////////////////////////////////////////////////////////////////////////////////////////
enum class FloatEncoding {
E4M3,
E5M2
};
template<FloatEncoding T>
struct alignas(1) float8_base {
static constexpr bool IS_E4M3 = (T == FloatEncoding::E4M3);
static constexpr bool IS_E5M2 = (T == FloatEncoding::E5M2);
// Number of Bits representing mantissa and exponents
static constexpr int FP32_NUM_BITS = 32;
static constexpr int FP32_NUM_EXPONENT_BITS = 8;
static constexpr int FP32_NUM_MANTISSA_BITS = 23;
static constexpr uint32_t FP32_NAN = 0x7fffffff;
static constexpr uint32_t FP32_INFINITY_MASK = 0x7f800000;
static constexpr int FP32_MAX_EXPONENT = 127;
static constexpr int FP32_MIN_EXPONENT = -126;
static constexpr int FP32_EXPONENT_BIAS = 127;
static constexpr int FP16_NUM_BITS = 16;
static constexpr int FP16_NUM_EXPONENT_BITS = 5;
static constexpr int FP16_NUM_MANTISSA_BITS = 10;
static constexpr uint16_t FP16_NAN = 0x7fff;
static constexpr uint16_t FP16_INFINITY_MASK = 0x7c00;
static constexpr int FP16_MAX_EXPONENT = 15;
static constexpr int FP16_MIN_EXPONENT = -14;
static constexpr int FP16_EXPONENT_BIAS = 15;
static constexpr int FP8_NUM_BITS = 8;
static constexpr int FP8_NUM_EXPONENT_BITS = IS_E4M3 ? 4 : 5;
static constexpr int FP8_NUM_MANTISSA_BITS = IS_E4M3 ? 3 : 2;
static constexpr uint8_t FP8_NAN = 0x7f; // Also F8_INF
static constexpr uint8_t FP8_INFINITY_MASK = IS_E4M3 ? 0x78 : 0x7c;
static constexpr int FP8_MAX_EXPONENT = IS_E4M3 ? 7 : 15;
static constexpr int FP8_MIN_EXPONENT = IS_E4M3 ? -6 : -14;
static constexpr int FP8_EXPONENT_BIAS = IS_E4M3 ? 7 : 15;
static constexpr uint8_t FP8_EXPONENT_MASK = (1 << FP8_NUM_EXPONENT_BITS) - 1;
static constexpr uint8_t FP8_MANTISSA_MASK = (1 << FP8_NUM_MANTISSA_BITS) - 1;
static constexpr uint8_t FP8_MAX_FLT = (IS_E4M3 ? 0x7e : 0x7b);
// 256 in float
static constexpr uint32_t FP8_SAT_VAL_FP32 = 0x43800000;
//
// Data members
//
/// Data container
uint8_t storage;
/// Ctors.
CUTLASS_HOST_DEVICE
float8_base() : storage(0) { }
/// Is finite implementation
CUTLASS_HOST_DEVICE
static bool isfinite(float flt) {
uint32_t s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
return (s & 0x7f800000) < 0x7f800000;
}
/// Is NaN implementation
CUTLASS_HOST_DEVICE
static bool isnan(float flt) {
uint32_t s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
return (s & 0x7fffffff) > 0x7f800000;
}
/// Is infinite implementation
CUTLASS_HOST_DEVICE
static bool isinf(float flt) {
uint32_t s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
// Sign = 0 for +inf, 1 for -inf
// Exponent = all ones
// Mantissa = all zeros
return (s == 0x7f800000) || (s == 0xff800000);
}
/// FP32 -> FP8 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static uint8_t convert_float_to_fp8(float const& flt) {
// software implementation rounds toward nearest even
uint32_t s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
// Extract the bits in the FP32 type
uint8_t sign = uint8_t((s >> 24 & 0x80));
int32_t exp = int32_t((s >> FP32_NUM_MANTISSA_BITS) & 0xff) - FP32_EXPONENT_BIAS;
int mantissa = s & 0x7fffff;
uint8_t u = 0;
uint8_t const kF8_NaN = 0x7f;
// NaN => NaN
if (isnan(flt)) {
return kF8_NaN;
}
// Inf => MAX_FLT (satfinite)
if (isinf(flt)) {
return sign | FP8_MAX_FLT;
}
// Special handling
if (exp == -128) {
// int8 range is from -128 to 127
// So 255(inf) - 127(bias) = 128 - will show up as -128
// satfinite
return (sign | FP8_MAX_FLT);
}
int sticky_bit = 0;
bool skip_sign = false;
bool may_be_nan = false;
if ( (exp >= FP8_MIN_EXPONENT) && (exp <= FP8_MAX_EXPONENT) ) {
// normal fp32 to normal fp8
exp = exp + FP8_EXPONENT_BIAS;
u = uint8_t((uint32_t(exp) & FP8_EXPONENT_MASK) << FP8_NUM_MANTISSA_BITS);
u = uint8_t(u | (mantissa >> (FP32_NUM_MANTISSA_BITS - FP8_NUM_MANTISSA_BITS)));
} else if(exp < FP8_MIN_EXPONENT) {
// normal single-precision to subnormal float8-precision representation
int rshift = (FP8_MIN_EXPONENT - exp);
if (rshift < FP32_NUM_BITS) {
mantissa |= (1 << FP32_NUM_MANTISSA_BITS);
sticky_bit = ((mantissa & ((1 << rshift) - 1)) != 0);
mantissa = (mantissa >> rshift);
u = (uint8_t(mantissa >> (FP32_NUM_MANTISSA_BITS- FP8_NUM_MANTISSA_BITS)) & FP8_MANTISSA_MASK);
} else {
mantissa = 0;
u = 0;
}
// Exponent > FP8_MAX_EXPONENT - this is a special case done to match HW
// 0x4380_0000 to 0x43e0_0000 - maps from 256 to 448, and does not saturate / inf.
} else {
if( exp == (FP8_MAX_EXPONENT + 1) ) {
uint8_t mantissa_tmp = uint8_t(mantissa >> (FP32_NUM_MANTISSA_BITS - FP8_NUM_MANTISSA_BITS));
if( mantissa_tmp < FP8_MANTISSA_MASK) {
exp = exp + FP8_EXPONENT_BIAS;
u = uint8_t(uint32_t(exp) << FP8_NUM_MANTISSA_BITS) | mantissa_tmp;
may_be_nan = (mantissa_tmp == (FP8_MANTISSA_MASK-1));
} else {
// satfinite
return (sign | FP8_MAX_FLT);
}
} else{
// satfinite
return (sign | FP8_MAX_FLT);
}
}
// round to nearest even
int NUM_BITS_SHIFT = FP32_NUM_MANTISSA_BITS - (FP8_NUM_MANTISSA_BITS + 1);
int round_bit = ((mantissa >> NUM_BITS_SHIFT) & 1);
sticky_bit |= ((mantissa & ((1 << NUM_BITS_SHIFT) - 1)) != 0);
if ((round_bit && sticky_bit) || (round_bit && (u & 1))) {
u = uint8_t(u + 1);
if( may_be_nan ) {
skip_sign = true;
}
}
if (u > FP8_MAX_FLT) {
// satfinite
u = (sign | FP8_MAX_FLT);
}
if( ! skip_sign ) {
u |= sign;
}
return u;
}
/// Converts a fp8 value stored as a uint8_t to a float
CUTLASS_HOST_DEVICE
static float convert_fp8_to_float(uint8_t const& x) {
uint32_t constexpr kF32_NaN = 0x7fffffff;
uint8_t const &f8 = x;
uint32_t sign = (f8 >> (FP8_NUM_BITS - 1)) & 1;
uint32_t exp = (f8 >> FP8_NUM_MANTISSA_BITS) & FP8_EXPONENT_MASK;
uint32_t mantissa = f8 & FP8_MANTISSA_MASK;
unsigned f = (sign << (FP32_NUM_BITS-1));
if (IS_E4M3 && exp == 15 && mantissa == 0x7) {
f = kF32_NaN;
}
else if (exp > 0 && (IS_E4M3 || exp < (FP8_MAX_EXPONENT + FP8_EXPONENT_BIAS + 1))) {
// normal
exp += (FP32_EXPONENT_BIAS - FP8_EXPONENT_BIAS);
f = f |
(exp << FP32_NUM_MANTISSA_BITS) |
(mantissa << (FP32_NUM_MANTISSA_BITS-FP8_NUM_MANTISSA_BITS));
} else if (exp == 0) {
if (mantissa) {
// subnormal
exp += (FP32_EXPONENT_BIAS - FP8_EXPONENT_BIAS) + 1;
while ((mantissa & (1 << FP8_NUM_MANTISSA_BITS)) == 0) {
mantissa <<= 1;
exp--;
}
mantissa &= FP8_MANTISSA_MASK;
f = f |
(exp << FP32_NUM_MANTISSA_BITS) |
(mantissa << (FP32_NUM_MANTISSA_BITS-FP8_NUM_MANTISSA_BITS));
} else {
// sign-preserving zero
}
} else {
if(mantissa == 0){
// Sign-preserving infinity
f = (f | 0x7f800000);
} else {
// Canonical NaN
f = kF32_NaN;
}
}
#if defined(__CUDA_ARCH__)
return reinterpret_cast<float const&>(f);
#else
float flt;
std::memcpy(&flt, &f, sizeof(flt));
return flt;
#endif
}
};
// Forward declaration of float_e5m2_t to define float_e4m3_t <=> float_e5m2_t
// conversions in class float_e4m3_t
struct float_e5m2_t;
///////////////////////////////////////////////////////////////
///
/// floating-point 8 type : E4M3
///
///////////////////////////////////////////////////////////////
struct alignas(1) float_e4m3_t : float8_base<FloatEncoding::E4M3> {
using Base = float8_base<FloatEncoding::E4M3>;
static constexpr int MAX_EXPONENT = Base::FP8_MAX_EXPONENT;
//
// Static conversion operators
//
/// Constructs from an uint8_t
CUTLASS_HOST_DEVICE
static float_e4m3_t bitcast(uint8_t x) {
float_e4m3_t f;
f.storage = x;
return f;
}
/// FP32 -> FP8 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static float_e4m3_t from_float(float const& flt) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t tmp;
float y = float();
asm volatile("cvt.rn.satfinite.e4m3x2.f32 %0, %1, %2;" : "=h"(tmp) : "f"(y), "f"(flt));
return *reinterpret_cast<float_e4m3_t *>(&tmp);
#else
return bitcast(Base::convert_float_to_fp8(flt));
#endif
}
/// FP16 -> E5M2 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static float_e4m3_t from_half(half const& flt) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t tmp = 0;
uint32_t bits = reinterpret_cast<uint16_t const &>(flt);
asm volatile("cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;" : "=h"(tmp) : "r"(bits));
return *reinterpret_cast<float_e4m3_t *>(&tmp);
#else
return bitcast(Base::convert_float_to_fp8(__half2float(flt)));
#endif
}
// E4M3 -> half
CUTLASS_HOST_DEVICE
static half to_half(float_e4m3_t const& x) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t bits = x.storage;
uint32_t packed;
asm volatile("cvt.rn.f16x2.e4m3x2 %0, %1;\n" : "=r"(packed) : "h"(bits));
return reinterpret_cast<half2 const &>(packed).x;
#else
return __float2half(Base::convert_fp8_to_float(x.storage));
#endif
}
// E4M3 -> Float
CUTLASS_HOST_DEVICE
static float to_float(float_e4m3_t const& x) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t bits = x.storage;
uint32_t packed;
asm volatile("cvt.rn.f16x2.e4m3x2 %0, %1;\n" : "=r"(packed) : "h"(bits));
return __half2float(reinterpret_cast<half2 const &>(packed).x);
#else
return Base::convert_fp8_to_float(x.storage);
#endif
}
//
// Methods
//
/// Constructor inheritance
using Base::Base;
/// Default constructor
float_e4m3_t() = default;
#ifdef CUDA_FP8_ENABLED
/// Conversion from CUDA's FP8 type
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(__nv_fp8_e4m3 x) {
storage = x.__x;
}
#endif
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(float x) {
storage = from_float(x).storage;
}
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(half x) {
storage = from_half(x).storage;
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(double x): float_e4m3_t(float(x)) {
}
/// Integer conversion
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(int x): float_e4m3_t(float(x)) {
}
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(unsigned x): float_e4m3_t(float(x)) {
}
/// E5M2 conversion. Defined after float_e5m2_t is defined.
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(float_e5m2_t x);
#ifdef CUDA_FP8_ENABLED
/// Assignment from CUDA's FP8 type
CUTLASS_HOST_DEVICE
float_e4m3_t & operator=(__nv_fp8_e4m3 x) {
storage = x.__x;
return *this;
}
#endif
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
return to_float(*this);
}
/// Converts to half
CUTLASS_HOST_DEVICE
operator half() const {
return to_half(*this);
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(to_float(*this));
}
/// Converts to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
#if defined(__CUDA_ARCH__)
return __half2int_rn(to_half(*this));
#else
return int(to_float(*this));
#endif
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
#if defined(__CUDA_ARCH__)
return bool(__half2int_rn(to_half(*this)));
#else
return bool(int(to_float(*this)));
#endif
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint8_t& raw() {
return storage;
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint8_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((storage & (1 << (Base::FP8_NUM_BITS - 1))) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((storage >> FP8_NUM_MANTISSA_BITS) & Base::FP8_EXPONENT_MASK);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 15;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(storage & Base::FP8_MANTISSA_MASK);
}
};
///////////////////////////////////////////////////////////////
///
/// floating-point 8 type : E5M2
///
///////////////////////////////////////////////////////////////
struct alignas(1) float_e5m2_t : float8_base<FloatEncoding::E5M2> {
using Base = float8_base<FloatEncoding::E5M2>;
static constexpr int MAX_EXPONENT = Base::FP8_MAX_EXPONENT;
//
// Static conversion operators
//
/// Constructs from an uint8_t
CUTLASS_HOST_DEVICE
static float_e5m2_t bitcast(uint8_t x) {
float_e5m2_t f;
f.storage = x;
return f;
}
/// FP32 -> FP8 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static float_e5m2_t from_float(float const& flt) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t tmp;
float y = float();
asm volatile("cvt.rn.satfinite.e5m2x2.f32 %0, %1, %2;" : "=h"(tmp) : "f"(y), "f"(flt));
return *reinterpret_cast<float_e5m2_t *>(&tmp);
#else
return bitcast(Base::convert_float_to_fp8(flt));
#endif
}
/// FP16 -> E5M2 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static float_e5m2_t from_half(half const& flt) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t tmp = 0;
uint32_t bits = reinterpret_cast<uint16_t const &>(flt);
asm volatile("cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;" : "=h"(tmp) : "r"(bits));
return *reinterpret_cast<float_e5m2_t *>(&tmp);
#else
return bitcast(Base::convert_float_to_fp8(__half2float(flt)));
#endif
}
// E5M2 -> half
CUTLASS_HOST_DEVICE
static half to_half(float_e5m2_t const& x) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t bits = x.storage;
uint32_t packed;
asm volatile("cvt.rn.f16x2.e5m2x2 %0, %1;\n" : "=r"(packed) : "h"(bits));
return reinterpret_cast<half2 const &>(packed).x;
#else
return __float2half(Base::convert_fp8_to_float(x.storage));
#endif
}
// E5M2 -> Float
CUTLASS_HOST_DEVICE
static float to_float(float_e5m2_t const& x) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t bits = x.storage;
uint32_t packed;
asm volatile("cvt.rn.f16x2.e5m2x2 %0, %1;\n" : "=r"(packed) : "h"(bits));
return __half2float(reinterpret_cast<half2 const &>(packed).x);
#else
return Base::convert_fp8_to_float(x.storage);
#endif
}
//
// Methods
//
/// Constructor inheritance
using Base::Base;
/// Default constructor
float_e5m2_t() = default;
#ifdef CUDA_FP8_ENABLED
/// Conversion from CUDA's FP8 type
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(__nv_fp8_e5m2 x) {
storage = x.__x;
}
#endif
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(float x) {
storage = from_float(x).storage;
}
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(half x) {
storage = from_half(x).storage;
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(double x): float_e5m2_t(float(x)) {
}
/// Integer conversion
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(int x): float_e5m2_t(float(x)) {
}
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(unsigned x): float_e5m2_t(float(x)) {
}
/// E4M3 conversion
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(float_e4m3_t x);
#ifdef CUDA_FP8_ENABLED
/// Assignment from CUDA's FP8 type
CUTLASS_HOST_DEVICE
float_e5m2_t & operator=(__nv_fp8_e5m2 x) {
storage = x.__x;
return *this;
}
#endif
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
return to_float(*this);
}
/// Converts to half
CUTLASS_HOST_DEVICE
operator half() const {
return to_half(*this);
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(to_float(*this));
}
/// Converts to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
#if defined(__CUDA_ARCH__)
return __half2int_rn(to_half(*this));
#else
return int(to_float(*this));
#endif
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
#if defined(__CUDA_ARCH__)
return bool(__half2int_rn(to_half(*this)));
#else
return bool(int(to_float(*this)));
#endif
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint8_t& raw() {
return storage;
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint8_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((storage & (1 << (Base::FP8_NUM_BITS - 1))) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((storage >> FP8_NUM_MANTISSA_BITS) & Base::FP8_EXPONENT_MASK);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 15;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(storage & Base::FP8_MANTISSA_MASK);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Arithmetic operators
//
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool operator==(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) == float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator!=(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) != float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) < float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<=(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) <= float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) > float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>=(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) >= float(rhs);
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator+(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float_e4m3_t(float(lhs) + float(rhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator-(float_e4m3_t const& lhs) {
return float_e4m3_t(-float(lhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator-(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float_e4m3_t(float(lhs) - float(rhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator*(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float_e4m3_t(float(lhs) * float(rhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator/(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float_e4m3_t(float(lhs) / float(rhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator+=(float_e4m3_t & lhs, float_e4m3_t const& rhs) {
lhs = float_e4m3_t(float(lhs) + float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator-=(float_e4m3_t & lhs, float_e4m3_t const& rhs) {
lhs = float_e4m3_t(float(lhs) - float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator*=(float_e4m3_t & lhs, float_e4m3_t const& rhs) {
lhs = float_e4m3_t(float(lhs) * float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator/=(float_e4m3_t & lhs, float_e4m3_t const& rhs) {
lhs = float_e4m3_t(float(lhs) / float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator++(float_e4m3_t & lhs) {
float tmp(lhs);
++tmp;
lhs = float_e4m3_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator--(float_e4m3_t & lhs) {
float tmp(lhs);
--tmp;
lhs = float_e4m3_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator++(float_e4m3_t & lhs, int) {
float_e4m3_t ret(lhs);
float tmp(lhs);
tmp++;
lhs = float_e4m3_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator--(float_e4m3_t & lhs, int) {
float_e4m3_t ret(lhs);
float tmp(lhs);
tmp--;
lhs = float_e4m3_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
bool operator==(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) == float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator!=(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) != float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) < float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<=(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) <= float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) > float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>=(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) >= float(rhs);
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator+(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float_e5m2_t(float(lhs) + float(rhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator-(float_e5m2_t const& lhs) {
return float_e5m2_t(-float(lhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator-(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float_e5m2_t(float(lhs) - float(rhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator*(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float_e5m2_t(float(lhs) * float(rhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator/(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float_e5m2_t(float(lhs) / float(rhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator+=(float_e5m2_t & lhs, float_e5m2_t const& rhs) {
lhs = float_e5m2_t(float(lhs) + float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator-=(float_e5m2_t & lhs, float_e5m2_t const& rhs) {
lhs = float_e5m2_t(float(lhs) - float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator*=(float_e5m2_t & lhs, float_e5m2_t const& rhs) {
lhs = float_e5m2_t(float(lhs) * float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator/=(float_e5m2_t & lhs, float_e5m2_t const& rhs) {
lhs = float_e5m2_t(float(lhs) / float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator++(float_e5m2_t & lhs) {
float tmp(lhs);
++tmp;
lhs = float_e5m2_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator--(float_e5m2_t & lhs) {
float tmp(lhs);
--tmp;
lhs = float_e5m2_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator++(float_e5m2_t & lhs, int) {
float_e5m2_t ret(lhs);
float tmp(lhs);
tmp++;
lhs = float_e5m2_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator--(float_e5m2_t & lhs, int) {
float_e5m2_t ret(lhs);
float tmp(lhs);
tmp--;
lhs = float_e5m2_t(tmp);
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// float_e4m3_t <=> float_e5m2_t conversions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// float_e4m3_t <= float_e5m2_t
CUTLASS_HOST_DEVICE
float_e4m3_t::float_e4m3_t(float_e5m2_t x) {
storage = from_float(float_e5m2_t::to_float(x)).storage;
}
/// float_e5m2_t <= float_e4m3_t
CUTLASS_HOST_DEVICE
float_e5m2_t::float_e5m2_t(float_e4m3_t x) {
storage = from_float(float_e4m3_t::to_float(x)).storage;
}
///////////////////////////////////////////////////////////////
///
/// Umbrella floating-point 8-bit data type : type_erased_dynamic_float8_t
/// This umbrella datatype can be enabled when a user provides a specific
/// datatype in runtime argument list.
///
/// Currently supported runtime datatypes compatible with type_erased_dynamic_float8_t:
/// QMMAFormat::E5M2
/// QMMAFormat::E4M3
///
///////////////////////////////////////////////////////////////
union type_erased_dynamic_float8_t {
uint8_t data;
cutlass::float_e5m2_t e5m2;
cutlass::float_e4m3_t e4m3;
CUTLASS_HOST_DEVICE
explicit operator cutlass::float_e5m2_t() const {
return e5m2;
}
CUTLASS_HOST_DEVICE
explicit operator cutlass::float_e4m3_t() const {
return e4m3;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Standard Library operations and definitions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
#if !defined(__CUDACC_RTC__)
namespace std {
/// Numeric limits common to all float8 types
template <typename T>
struct float8_base_numeric_limits {
private:
using F8Type = T;
public:
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
static std::float_denorm_style const has_denorm = std::denorm_present;
static bool const has_denorm_loss = true;
static std::float_round_style const round_style = std::round_to_nearest;
static bool const is_iec559 = false;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = F8Type::FP8_NUM_MANTISSA_BITS;
/// Least positive value
CUTLASS_HOST_DEVICE
static F8Type min() { return F8Type::bitcast(0x01); }
/// Maximum finite value
CUTLASS_HOST_DEVICE
static F8Type max() { return F8Type::bitcast(F8Type::FP8_MAX_FLT); }
/// Returns maximum rounding error
CUTLASS_HOST_DEVICE
static F8Type round_error() { return F8Type(0.5f); }
/// Returns positive infinity value
CUTLASS_HOST_DEVICE
static F8Type infinity() { return F8Type::bitcast(F8Type::FP8_INFINITY_MASK); }
/// Returns quiet NaN value
CUTLASS_HOST_DEVICE
static F8Type quiet_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); }
/// Returns signaling NaN value
CUTLASS_HOST_DEVICE
static F8Type signaling_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); }
/// Returns smallest positive subnormal value
CUTLASS_HOST_DEVICE
static F8Type denorm_min() { return F8Type::bitcast(0x01); }
};
/// Numeric limits for float_e4m3_t
template <>
struct numeric_limits<cutlass::float_e4m3_t> :
public float8_base_numeric_limits<cutlass::float_e4m3_t> {
static bool const has_infinity = false;
/// Minimum finite value
static cutlass::float_e4m3_t lowest() { return cutlass::float_e4m3_t::bitcast(0xfe); }
/// Machine epsilon, that is, the difference between 1.0 and the next representable value
static cutlass::float_e4m3_t epsilon() { return cutlass::float_e4m3_t::bitcast(0x20); }
};
/// Numeric limits for float_e5m2_t
template <>
struct numeric_limits<cutlass::float_e5m2_t> :
public float8_base_numeric_limits<cutlass::float_e5m2_t> {
static bool const has_infinity = true;
/// Minimum finite value
static cutlass::float_e5m2_t lowest() { return cutlass::float_e5m2_t::bitcast(0xfb); }
/// Machine epsilon, that is, the difference between 1.0 and the next representable value
static cutlass::float_e5m2_t epsilon() { return cutlass::float_e5m2_t::bitcast(0x34); }
};
} // namespace std
#endif
namespace platform {
/// Numeric limits common to all float8 types
template <typename T>
struct float8_base_numeric_limits {
private:
using F8Type = T;
public:
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
#if !defined(__CUDACC_RTC__)
static std::float_denorm_style const has_denorm = std::denorm_present;
#endif
static bool const has_denorm_loss = true;
#if !defined(__CUDACC_RTC__)
static std::float_round_style const round_style = std::round_to_nearest;
#endif
static bool const is_iec559 = false;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = F8Type::FP8_NUM_MANTISSA_BITS;
/// Least positive value
CUTLASS_HOST_DEVICE
static F8Type min() { return F8Type::bitcast(0x01); }
/// Maximum finite value
CUTLASS_HOST_DEVICE
static F8Type max() { return F8Type::bitcast(F8Type::FP8_MAX_FLT); }
/// Returns maximum rounding error
CUTLASS_HOST_DEVICE
static F8Type round_error() { return F8Type(0.5f); }
/// Returns positive infinity value
CUTLASS_HOST_DEVICE
static F8Type infinity() { return F8Type::bitcast(F8Type::FP8_INFINITY_MASK); }
/// Returns quiet NaN value
CUTLASS_HOST_DEVICE
static F8Type quiet_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); }
/// Returns signaling NaN value
CUTLASS_HOST_DEVICE
static F8Type signaling_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); }
/// Returns smallest positive subnormal value
CUTLASS_HOST_DEVICE
static F8Type denorm_min() { return F8Type::bitcast(0x01); }
};
/// std::numeric_limits
template <class T>
struct numeric_limits;
/// Numeric limits for float_e4m3_t
template <>
struct numeric_limits<cutlass::float_e4m3_t> :
public float8_base_numeric_limits<cutlass::float_e4m3_t> {
static bool const has_infinity = false;
/// Minimum finite value
static cutlass::float_e4m3_t lowest() { return cutlass::float_e4m3_t::bitcast(0xfe); }
/// Machine epsilon, that is, the difference between 1.0 and the next representable value
static cutlass::float_e4m3_t epsilon() { return cutlass::float_e4m3_t::bitcast(0x20); }
};
/// Numeric limits for float_e5m2_t
template <>
struct numeric_limits<cutlass::float_e5m2_t> :
public float8_base_numeric_limits<cutlass::float_e5m2_t> {
static bool const has_infinity = true;
/// Minimum finite value
static cutlass::float_e5m2_t lowest() { return cutlass::float_e5m2_t::bitcast(0xfb); }
/// Machine epsilon, that is, the difference between 1.0 and the next representable value
static cutlass::float_e5m2_t epsilon() { return cutlass::float_e5m2_t::bitcast(0x34); }
};
} // namespace platform
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// User-defined literals
//
CUTLASS_HOST_DEVICE
cutlass::float_e4m3_t operator "" _fe4m3(long double x) {
return cutlass::float_e4m3_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::float_e4m3_t operator "" _fe4m3(unsigned long long int x) {
return cutlass::float_e4m3_t(int(x));
}
CUTLASS_HOST_DEVICE
cutlass::float_e5m2_t operator "" _fe5m2(long double x) {
return cutlass::float_e5m2_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::float_e5m2_t operator "" _fe5m2(unsigned long long int x) {
return cutlass::float_e5m2_t(int(x));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/float8.h/0 | {
"file_path": "include/cutlass/float8.h",
"repo_id": "include",
"token_count": 16661
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief The universal GEMM accommodates serial reductions, parallel reductions, batched strided, and
batched array variants.
*/
#pragma once
// common
#include "cutlass/cutlass.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/detail/layout.hpp"
#include "cutlass/detail/mma.hpp"
#include "cutlass/cuda_host_adapter.hpp"
#if !defined(__CUDACC_RTC__)
#include "cutlass/cluster_launch.hpp"
#include "cutlass/trace.h"
#endif // !defined(__CUDACC_RTC__)
// 2.x
#include "cutlass/gemm/device/gemm_universal_base.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h"
// 3.x
#include "cutlass/gemm/kernel/gemm_universal.hpp"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::device {
////////////////////////////////////////////////////////////////////////////////
/*!
GemmUniversalAdapter is a stateful, reusable GEMM handle built around a kernel
of type cutlass::gemm::kernel::Gemm or cutlass::gemm::kernel::GemmUniversal.
It manages the lifetime of the underlying `kernel::Params` struct, and exposes APIs
to create it from the host facing arguments. For power users, new static methods
are exposed in 3.x APIs that bypass the stateful methods or args->params lowering.
It supports kernel types that implement both the 2.x and 3.0 APIs,
however, this is done by specializing the implementation of GemmUniversalAdapter
on the two kernel API types, and thus, GemmUniversalAdapter's behaviour might
differ between the two specializations.
*/
template <class GemmKernel_, class Enable = void>
class GemmUniversalAdapter;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////// CUTLASS 3.x API /////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
template <class GemmKernel_>
class GemmUniversalAdapter<
GemmKernel_,
cute::enable_if_t<gemm::detail::IsCutlass3GemmKernel<GemmKernel_>::value>>
{
public:
using GemmKernel = GemmKernel_;
using TileShape = typename GemmKernel::TileShape;
using ElementA = typename GemmKernel::ElementA;
using ElementB = typename GemmKernel::ElementB;
using ElementC = typename GemmKernel::ElementC;
using ElementD = typename GemmKernel::ElementD;
using ElementAccumulator = typename GemmKernel::ElementAccumulator;
using DispatchPolicy = typename GemmKernel::DispatchPolicy;
using CollectiveMainloop = typename GemmKernel::CollectiveMainloop;
using CollectiveEpilogue = typename GemmKernel::CollectiveEpilogue;
// Map back to 2.x type as best as possible
using LayoutA = gemm::detail::StrideToLayoutTagA_t<typename GemmKernel::StrideA>;
using LayoutB = gemm::detail::StrideToLayoutTagB_t<typename GemmKernel::StrideB>;
using LayoutC = gemm::detail::StrideToLayoutTagC_t<typename GemmKernel::StrideC>;
using LayoutD = gemm::detail::StrideToLayoutTagC_t<typename GemmKernel::StrideD>;
static bool const kEnableCudaHostAdapter = CUTLASS_ENABLE_CUDA_HOST_ADAPTER;
static ComplexTransform const kTransformA = cute::is_same_v<typename GemmKernel::CollectiveMainloop::TransformA, cute::conjugate> ?
ComplexTransform::kConjugate : ComplexTransform::kNone;
static ComplexTransform const kTransformB = cute::is_same_v<typename GemmKernel::CollectiveMainloop::TransformB, cute::conjugate> ?
ComplexTransform::kConjugate : ComplexTransform::kNone;
// Legacy: Assume MultiplyAdd only since we do not use this tag type in 3.0
using MathOperator = cutlass::arch::OpMultiplyAdd;
using OperatorClass = cutlass::detail::get_operator_class_t<typename CollectiveMainloop::TiledMma>;
using ArchTag = typename GemmKernel::ArchTag;
// NOTE: Assume identity swizzle for now
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Assume TiledMma's ShapeMNK is the same as 2.x's ThreadblockShape
using ThreadblockShape = cutlass::gemm::GemmShape<
cute::size<0>(TileShape{}),
cute::size<1>(TileShape{}),
cute::size<2>(TileShape{})>;
using ClusterShape = cutlass::gemm::GemmShape<
cute::size<0>(typename GemmKernel::DispatchPolicy::ClusterShape{}),
cute::size<1>(typename GemmKernel::DispatchPolicy::ClusterShape{}),
cute::size<2>(typename GemmKernel::DispatchPolicy::ClusterShape{})>;
// Instruction shape is easy too, since we get that directly from our TiledMma's atom shape
using InstructionShape = cutlass::gemm::GemmShape<
cute::size<0>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{}),
cute::size<1>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{}),
cute::size<2>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{})>;
// Legacy: provide a correct warp count, but no reliable warp shape
static int const kThreadCount = GemmKernel::MaxThreadsPerBlock;
// Warp shape is not a primary API type in 3.x
// But we can best approximate it by inspecting the TiledMma
// For this, we make the assumption that we always have 4 warps along M, and rest along N, none along K
// We also always round up the warp count to 4 if the tiled mma is smaller than 128 threads
static constexpr int WarpsInMma = cute::max(4, CUTE_STATIC_V(cute::size(typename GemmKernel::TiledMma{})) / 32);
static constexpr int WarpsInMmaM = 4;
static constexpr int WarpsInMmaN = cute::ceil_div(WarpsInMma, WarpsInMmaM);
using WarpCount = cutlass::gemm::GemmShape<WarpsInMmaM, WarpsInMmaN, 1>;
using WarpShape = cutlass::gemm::GemmShape<
CUTE_STATIC_V(cute::tile_size<0>(typename CollectiveMainloop::TiledMma{})) / WarpsInMmaM,
CUTE_STATIC_V(cute::tile_size<1>(typename CollectiveMainloop::TiledMma{})) / WarpsInMmaN,
CUTE_STATIC_V(cute::tile_size<2>(typename CollectiveMainloop::TiledMma{}))>;
static int constexpr kStages = CollectiveMainloop::DispatchPolicy::Stages;
// Inspect TiledCopy for A and B to compute the alignment size
static int constexpr kAlignmentA = cutlass::detail::get_alignment_count_from_gmem_tiled_copy<
typename CollectiveMainloop::GmemTiledCopyA, ElementA, typename CollectiveMainloop::TiledMma::ValTypeA>();
static int constexpr kAlignmentB = cutlass::detail::get_alignment_count_from_gmem_tiled_copy<
typename CollectiveMainloop::GmemTiledCopyB, ElementB, typename CollectiveMainloop::TiledMma::ValTypeB>();
static int constexpr kAlignmentC = cutlass::detail::get_alignment_count_from_gmem_tiled_copy<
typename CollectiveEpilogue::GmemTiledCopyC, ElementC>();
static int constexpr kAlignmentD = cutlass::detail::get_alignment_count_from_gmem_tiled_copy<
typename CollectiveEpilogue::GmemTiledCopyD, ElementD>();
using EpilogueOutputOp = typename CollectiveEpilogue::ThreadEpilogueOp;
// Split-K preserves splits that are 128b aligned
static int constexpr kSplitKAlignment = cute::max(
128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
/// Argument structure: User API
using Arguments = typename GemmKernel::Arguments;
/// Argument structure: Kernel API
using Params = typename GemmKernel::Params;
private:
/// Kernel API parameters object
Params params_;
public:
/// Access the Params structure
Params const& params() const {
return params_;
}
/// Determines whether the GEMM can execute the given problem.
static Status
can_implement(Arguments const& args) {
if (GemmKernel::can_implement(args)) {
return Status::kSuccess;
}
else {
return Status::kInvalid;
}
}
/// Gets the workspace size
static size_t
get_workspace_size(Arguments const& args) {
size_t workspace_bytes = 0;
if (args.mode == GemmUniversalMode::kGemmSplitKParallel) {
workspace_bytes += sizeof(int) * size_t(cute::size<0>(TileShape{})) * size_t(cute::size<1>(TileShape{}));
}
CUTLASS_TRACE_HOST(" workspace_bytes: " << workspace_bytes);
workspace_bytes += GemmKernel::get_workspace_size(args);
return workspace_bytes;
}
/// Computes the grid shape
static dim3
get_grid_shape(Arguments const& args, void* workspace = nullptr) {
auto tmp_params = GemmKernel::to_underlying_arguments(args, workspace);
return GemmKernel::get_grid_shape(tmp_params);
}
/// Computes the grid shape
static dim3
get_grid_shape(Params const& params) {
return GemmKernel::get_grid_shape(params);
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int /* smem_capacity */ = -1) {
CUTLASS_TRACE_HOST("GemmUniversal::maximum_active_blocks()");
int max_active_blocks = -1;
int smem_size = GemmKernel::SharedStorageSize;
// first, account for dynamic smem capacity if needed
cudaError_t result;
if (smem_size >= (48 << 10)) {
CUTLASS_TRACE_HOST(" Setting smem size to " << smem_size);
result = cudaFuncSetAttribute(
device_kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (cudaSuccess != result) {
result = cudaGetLastError(); // to clear the error bit
CUTLASS_TRACE_HOST(
" cudaFuncSetAttribute() returned error: "
<< cudaGetErrorString(result));
return -1;
}
}
// query occupancy after setting smem size
result = cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks,
device_kernel<GemmKernel>,
GemmKernel::MaxThreadsPerBlock,
smem_size);
if (cudaSuccess != result) {
result = cudaGetLastError(); // to clear the error bit
CUTLASS_TRACE_HOST(
" cudaOccupancyMaxActiveBlocksPerMultiprocessor() returned error: "
<< cudaGetErrorString(result));
return -1;
}
CUTLASS_TRACE_HOST(" max_active_blocks: " << max_active_blocks);
return max_active_blocks;
}
/// Initializes GEMM state from arguments.
Status
initialize(
Arguments const& args,
void* workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter* cuda_adapter = nullptr) {
CUTLASS_TRACE_HOST("GemmUniversal::initialize() - workspace "
<< workspace << ", stream: " << (stream ? "non-null" : "null"));
// Initialize the workspace
Status status = GemmKernel::initialize_workspace(args, workspace, stream, cuda_adapter);
if (status != Status::kSuccess) {
return status;
}
// Initialize the Params structure
params_ = GemmKernel::to_underlying_arguments(args, workspace);
// Don't set the function attributes - require the CudaHostAdapter to set it.
if constexpr (kEnableCudaHostAdapter) {
CUTLASS_ASSERT(cuda_adapter);
return Status::kSuccess;
}
else {
//
// Account for dynamic smem capacity if needed
//
int smem_size = GemmKernel::SharedStorageSize;
CUTLASS_ASSERT(cuda_adapter == nullptr);
if (smem_size >= (48 << 10)) {
CUTLASS_TRACE_HOST(" Setting smem size to " << smem_size);
cudaError_t result = cudaFuncSetAttribute(
device_kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (cudaSuccess != result) {
result = cudaGetLastError(); // to clear the error bit
CUTLASS_TRACE_HOST(" cudaFuncSetAttribute() returned error: " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
}
return Status::kSuccess;
}
/// Update API is preserved in 3.0, but does not guarantee a lightweight update of params.
Status
update(Arguments const& args, void* workspace = nullptr) {
CUTLASS_TRACE_HOST("GemmUniversal()::update() - workspace: " << workspace);
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes > 0 && nullptr == workspace) {
return Status::kErrorWorkspaceNull;
}
params_ = GemmKernel::to_underlying_arguments(args, workspace);
return Status::kSuccess;
}
/// Primary run() entry point API that is static allowing users to create and manage their own params.
/// Supplied params struct must be construct by calling GemmKernel::to_underling_arguments()
static Status
run(Params& params,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
CUTLASS_TRACE_HOST("GemmUniversal::run()");
dim3 const block = GemmKernel::get_block_shape();
dim3 const grid = get_grid_shape(params);
// configure smem size and carveout
int smem_size = GemmKernel::SharedStorageSize;
Status launch_result{ Status::kSuccess };
// Use extended launch API only for mainloops that use it
if constexpr (GemmKernel::ArchTag::kMinComputeCapability >= 90) {
dim3 cluster(cute::size<0>(typename GemmKernel::DispatchPolicy::ClusterShape{}),
cute::size<1>(typename GemmKernel::DispatchPolicy::ClusterShape{}),
cute::size<2>(typename GemmKernel::DispatchPolicy::ClusterShape{}));
void* kernel_params[] = {¶ms};
if constexpr (kEnableCudaHostAdapter) {
//
// Use the cuda host adapter
//
CUTLASS_ASSERT(cuda_adapter);
if (cuda_adapter) {
launch_result = cuda_adapter->launch(grid,
cluster,
block,
smem_size,
stream,
kernel_params,
0);
}
else {
return Status::kErrorInternal;
}
}
else {
CUTLASS_ASSERT(cuda_adapter == nullptr);
void const* kernel = (void const*) device_kernel<GemmKernel>;
if constexpr (GemmKernel::ArchTag::kMinComputeCapability == 90) {
launch_result = ClusterLauncher::launch(
grid, cluster, block, smem_size, stream, kernel, kernel_params);
}
}
}
else {
launch_result = Status::kSuccess;
if constexpr (kEnableCudaHostAdapter) {
CUTLASS_ASSERT(cuda_adapter);
if (cuda_adapter) {
void* kernel_params[] = {¶ms};
launch_result = cuda_adapter->launch(
grid, block, smem_size, stream, kernel_params, 0
);
}
else {
return Status::kErrorInternal;
}
}
else {
CUTLASS_ASSERT(cuda_adapter == nullptr);
device_kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params);
}
}
cudaError_t result = cudaGetLastError();
if (cudaSuccess == result && Status::kSuccess == launch_result) {
return Status::kSuccess;
}
else {
CUTLASS_TRACE_HOST(" Kernel launch failed. Reason: " << result);
return Status::kErrorInternal;
}
}
//
// Non-static launch overloads that first create and set the internal params struct of this kernel handle.
//
/// Launches the kernel after first constructing Params internal state from supplied arguments.
Status
run(
Arguments const& args,
void* workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr
) {
Status status = initialize(args, workspace, stream, cuda_adapter);
if (Status::kSuccess == status) {
status = run(params_, stream, cuda_adapter);
}
return status;
}
/// Launches the kernel after first constructing Params internal state from supplied arguments.
Status
operator()(
Arguments const& args,
void* workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
return run(args, workspace, stream, cuda_adapter);
}
/// Overload that allows a user to re-launch the same kernel without updating internal params struct.
Status
run(cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) {
return run(params_, stream, cuda_adapter);
}
/// Overload that allows a user to re-launch the same kernel without updating internal params struct.
Status
operator()(cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) {
return run(params_, stream, cuda_adapter);
}
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////// CUTLASS 2.x API /////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
template <class GemmKernel_>
class GemmUniversalAdapter<
GemmKernel_,
cute::enable_if_t<not gemm::detail::IsCutlass3GemmKernel<GemmKernel_>::value>>
{
public:
using GemmKernel = GemmKernel_;
static bool const kInternalTranspose =
!cutlass::epilogue::threadblock::detail::is_2x_evt_v<typename GemmKernel::Epilogue> && // 2.x EVT does not require internal transpose
cute::is_same<typename GemmKernel::LayoutC, cutlass::layout::RowMajor>::value;
using ThreadblockShape = typename GemmKernel::Mma::Shape;
using WarpShape = typename GemmKernel::WarpShape;
using InstructionShape = typename GemmKernel::InstructionShape;
// warp-level, arch-level (instruction), math operator
using WarpMmaOperator = typename GemmKernel::Mma::Policy::Operator;
using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator;
using MathOperator = typename WarpMmaOperator::MathOperator;
// Operator class and arch tag extract bottom-up
// set it for top-level gemm device-level template
using OperatorClass = typename WarpMmaOperator::OperatorClass;
using ArchTag = typename WarpMmaOperator::ArchTag;
// Type, layout, and complex transform deliberately exchanged with B
using MapArguments = kernel::detail::MapArguments<
typename GemmKernel::ElementA,
typename GemmKernel::LayoutA,
GemmKernel::kTransformA,
GemmKernel::kAlignmentA,
typename GemmKernel::ElementB,
typename GemmKernel::LayoutB,
GemmKernel::kTransformB,
GemmKernel::kAlignmentB,
typename GemmKernel::LayoutC,
kInternalTranspose
>;
using ElementA = typename MapArguments::ElementA;
using LayoutA = typename MapArguments::LayoutA;
static ComplexTransform const kTransformA = MapArguments::kTransformA;
static int const kAlignmentA = MapArguments::kAlignmentA;
using ElementB = typename MapArguments::ElementB;
using LayoutB = typename MapArguments::LayoutB;
static ComplexTransform const kTransformB = MapArguments::kTransformB;
static int const kAlignmentB = MapArguments::kAlignmentB;
using ElementC = typename GemmKernel::ElementC;
using LayoutC = typename MapArguments::LayoutC;
static int const kAlignmentC = GemmKernel::kAlignmentC;
// C and D same type for 2.x kernel
using ElementD = ElementC;
using LayoutD = LayoutC;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementD, LayoutD>;
static int const kStages = GemmKernel::Mma::kStages;
using EpilogueOutputOp = typename GemmKernel::EpilogueOutputOp;
using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator;
using ThreadblockSwizzle = typename GemmKernel::ThreadblockSwizzle;
using UnderlyingOperator = GemmUniversalBase<GemmKernel>;
using Arguments = typename UnderlyingOperator::Arguments;
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the GEMM.
GemmUniversalAdapter() { }
/// Helper to construct a transposed equivalent for the underying GEMM operator
static Arguments to_underlying_arguments(Arguments const &args) {
if (kInternalTranspose) {
return args.transposed_problem();
}
else {
return args;
}
}
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args, CudaHostAdapter *cuda_adapter = nullptr) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args), cuda_adapter);
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args, CudaHostAdapter *cuda_adapter = nullptr) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args), cuda_adapter);
}
/// Computes the grid shape
static dim3 get_grid_shape(Arguments const &args) {
return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args));
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int smem_capacity = -1) {
return UnderlyingOperator::maximum_active_blocks(smem_capacity);
}
/// Initializes GEMM state from arguments.
Status initialize(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr
) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream, cuda_adapter);
}
/// Lightweight update given a subset of arguments.
Status update(Arguments const &args) {
return underlying_operator_.update(to_underlying_arguments(args));
}
/// Runs the kernel using initialized state.
Status run(
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
return underlying_operator_.run(stream, cuda_adapter);
}
/// Runs the kernel using initialized state.
Status operator()(
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
Status status = initialize(args, workspace, stream, cuda_adapter);
if (status == Status::kSuccess) {
status = run(stream, cuda_adapter);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::device
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/gemm_universal_adapter.h/0 | {
"file_path": "include/cutlass/gemm/device/gemm_universal_adapter.h",
"repo_id": "include",
"token_count": 8505
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Default kernel-level Blocked-Ell sparse gemm operators.
This operator combines threadblock-scoped ELL MMA
with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm.h"
#include "cutlass/gemm/kernel/gemm_pipelined.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
#include "cutlass/gemm/kernel/ell_gemm.h"
#include "cutlass/gemm/threadblock/default_ell_mma.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse>
struct DefaultEllGemm;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator, IsASparse> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Turing Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, layout::RowMajor,
ElementAccumulator,
arch::OpClassTensorOp,
arch::Sm75,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
SplitKSerial,
Operator,
IsASparse
> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassTensorOp,
arch::Sm75,
ThreadblockShape,
WarpShape,
InstructionShape,
2,
Operator
>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
typename Mma::Operator,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Integer Matrix Multiply Interleaved layout
template <
/// Element type for A matrix operand
typename ElementA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Number of Interleaved k
int InterleavedK,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse>
struct DefaultEllGemm<
ElementA, layout::ColumnMajorInterleaved<InterleavedK>, kAlignmentA,
ElementB, layout::RowMajorInterleaved<InterleavedK>, kAlignmentB, ElementC,
layout::ColumnMajorInterleaved<InterleavedK>, int32_t,
arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape,
InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages,
SplitKSerial, Operator, IsASparse> {
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementAccumulator = int32_t;
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages, Operator,
true>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::
DefaultInterleavedEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Turing Integer Matrix Multiply Interleaved layout
template <
/// Element type for A matrix operand
typename ElementA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of Interleaved k
int InterleavedK,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse>
struct DefaultEllGemm<ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
kAlignmentA, ElementB,
layout::RowMajorInterleaved<InterleavedK>, kAlignmentB,
ElementC, layout::ColumnMajorInterleaved<InterleavedK>,
int32_t, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape,
WarpShape, InstructionShape, EpilogueOutputOp,
ThreadblockSwizzle, 2, SplitKSerial, Operator, IsASparse> {
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementAccumulator = int32_t;
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassTensorOp, arch::Sm75, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, true>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::
DefaultInterleavedEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Volta architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, layout::RowMajor,
ElementAccumulator,
arch::OpClassTensorOp,
arch::Sm70,
ThreadblockShape,
WarpShape,
GemmShape<8, 8, 4>,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
SplitKSerial,
Operator,
IsASparse
> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassTensorOp,
arch::Sm70,
ThreadblockShape,
WarpShape,
GemmShape<8, 8, 4>,
2,
Operator
>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
ThreadblockShape,
typename Mma::Operator,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for SIMT
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
layout::RowMajor,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
GemmShape<1, 1, 1>,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
SplitKSerial,
Operator,
IsASparse> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassSimt,
arch::Sm50,
ThreadblockShape,
WarpShape,
GemmShape<1, 1, 1>,
2,
Operator>::ThreadblockMma;
static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages
int Stages,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
layout::RowMajor,
ElementAccumulator,
arch::OpClassSimt,
arch::Sm80,
ThreadblockShape,
WarpShape,
GemmShape<1, 1, 1>,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
IsASparse> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassSimt, arch::Sm80,
ThreadblockShape, WarpShape, GemmShape<1, 1, 1>, Stages,
Operator>::ThreadblockMma;
static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial,IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for SIMT DP4A
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Layout type for C matrix operand
typename LayoutC,
/// Element type for C and D matrix operands
typename ElementC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<int8_t, LayoutA, kAlignmentA, int8_t, LayoutB, kAlignmentB,
ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt,
ArchTag, ThreadblockShape, WarpShape, GemmShape<1, 1, 4>,
EpilogueOutputOp, ThreadblockSwizzle, 2, SplitKSerial,
Operator, IsASparse> {
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using ElementB = int8_t;
using OperatorClass = arch::OpClassSimt;
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
LayoutC,
arch::OpClassSimt,
arch::Sm50,
ThreadblockShape,
WarpShape,
InstructionShape,
2,
Operator
>::ThreadblockMma;
static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Wmma Gemm Kernel
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, LayoutC,
ElementAccumulator,
arch::OpClassWmmaTensorOp,
ArchTag,
ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
IsASparse> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWmmaTensorOp<
ThreadblockShape,
typename Mma::Operator,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/default_ell_gemm.h/0 | {
"file_path": "include/cutlass/gemm/kernel/default_ell_gemm.h",
"repo_id": "include",
"token_count": 10017
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a Block-Ell sparse gemm kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/semaphore.h"
#include "cutlass/arch/arch.h"
#include "cutlass/transform/threadblock/ell_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool SplitKSerial, ///! If true, code supporting split-K via serial reduction is enabled.
bool IsASparse ///! If true, A is sparse matrix
>
struct EllGemm {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static bool const kSplitKSerial = SplitKSerial;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size{};
cutlass::gemm::GemmCoord grid_tiled_shape{};
int swizzle_log_tile{0};
typename Mma::IteratorA::Params params_A{};
typename Mma::IteratorA::TensorRef ref_A{};
typename Mma::IteratorB::Params params_B{};
typename Mma::IteratorB::TensorRef ref_B{};
typename Epilogue::OutputTileIterator::Params params_C{};
typename Epilogue::OutputTileIterator::TensorRef ref_C{};
typename Epilogue::OutputTileIterator::Params params_D{};
typename Epilogue::OutputTileIterator::TensorRef ref_D{};
typename OutputOp::Params output_op{};
int *semaphore = nullptr;
int gemm_k_iterations{0};
int gemm_k_size{0};
const int* ell_idx = nullptr;
int ell_ncol{0};
int ell_blocksize{0};
int ell_base_idx{0};
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
const int* ell_idx,
int ell_ncol,
int ell_blocksize,
int ell_base_idx,
typename OutputOp::Params output_op = typename OutputOp::Params(),
int *workspace = nullptr
):
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_C(ref_C.layout()),
ref_C(ref_C),
params_D(ref_D.layout()),
ref_D(ref_D),
output_op(output_op),
ell_idx(ell_idx),
ell_ncol(ell_ncol),
ell_blocksize(ell_blocksize),
ell_base_idx(ell_base_idx)
{
int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
semaphore = workspace;
}
};
/// Shared memory storage structure
struct SharedStorage {
union{
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
typename cutlass::transform::threadblock::ell::SharedStorage ell;
};
//
// Methods
//
EllGemm() = default;
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D) {
static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if (!TensorRef_aligned(ref_A, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_D, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int tile_in_ell_block = (params.ell_blocksize + Mma::Shape::kM - 1 ) / Mma::Shape::kM;
int ell_block_offset_m = threadblock_tile_offset.m() / tile_in_ell_block;
int tile_offset_m = threadblock_tile_offset.m() % tile_in_ell_block;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
typename Mma::FragmentC accumulators;
accumulators.clear();
// skip computation if matrix is 0
if (params.ell_ncol > 0) {
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
ell_block_offset_m * params.ell_blocksize
+ tile_offset_m * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
int ell_idx_start =
(threadblock_tile_offset.m() / tile_in_ell_block) *
(params.ell_ncol / params.ell_blocksize);
const int* ell_idx_ptr = &(params.ell_idx[ell_idx_start]);
// Problem size is a function of threadblock index in the K dimension
int problem_size_k = min(
params.problem_size.k(),
(threadblock_tile_offset.k() + 1) * params.gemm_k_size);
problem_size_k = min(problem_size_k, params.ell_ncol);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations =
(problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Define coef for ELL index depending on LayoutB
int ell_stride = iterator_B.get_stride();
typename cutlass::transform::threadblock::ell::Iterator ell_iterator(
shared_storage.ell,
ell_idx_ptr,
params.ell_blocksize,
params.ell_base_idx,
Mma::Shape::kK,
problem_size_k,
ell_stride,
thread_idx
);
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
if (!kSplitKSerial || gemm_k_iterations > 0) {
// check if index computations can be skipped
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
constexpr bool is_double = (sizeof(Mma::IteratorA::Element) == 8);
constexpr bool is_multiple_alignment =
(kAlignmentA > 1) && (kAlignmentB > 1) && (kAlignmentC > 1);
const bool is_specialized_blocksize =
((params.ell_blocksize) & (params.ell_blocksize-1)) == 0
&& params.ell_blocksize >= Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
if ((is_double || is_multiple_alignment) && is_specialized_blocksize) {
mma.operator()<true, true>(
gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator);
}
else {
mma.operator()<true, false>(
gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator);
}
}
} // if (params.ell_ncols > 0)
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
ell_block_offset_m = threadblock_tile_offset.m() / tile_in_ell_block;
tile_offset_m = threadblock_tile_offset.m() % tile_in_ell_block;
//assume identity swizzle
MatrixCoord threadblock_offset(
ell_block_offset_m * params.ell_blocksize
+ tile_offset_m * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
//avoid out of bounds
MatrixCoord threadblock_extent(
min(params.problem_size.m(),
ell_block_offset_m * params.ell_blocksize
+ min((tile_offset_m + 1) * Mma::Shape::kM, params.ell_blocksize)),
min(params.problem_size.n(),
(threadblock_tile_offset.n()+1) * Mma::Shape::kN)
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// If performing a reduction via split-K, fetch the initial synchronization
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
params.ref_C.data(),
threadblock_extent,
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
threadblock_extent,
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op, iterator_D, accumulators, iterator_C);
//
// Release the semaphore
//
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
// B is Sparse
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled.
>
struct EllGemm<Mma_, Epilogue_, ThreadblockSwizzle_, SplitKSerial, false> {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static bool const kSplitKSerial = SplitKSerial;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size{};
cutlass::gemm::GemmCoord grid_tiled_shape{};
int swizzle_log_tile{0};
typename Mma::IteratorA::Params params_A{};
typename Mma::IteratorA::TensorRef ref_A{};
typename Mma::IteratorB::Params params_B{};
typename Mma::IteratorB::TensorRef ref_B{};
typename Epilogue::OutputTileIterator::Params params_C{};
typename Epilogue::OutputTileIterator::TensorRef ref_C{};
typename Epilogue::OutputTileIterator::Params params_D{};
typename Epilogue::OutputTileIterator::TensorRef ref_D{};
typename OutputOp::Params output_op{};
int *semaphore = nullptr;
int gemm_k_iterations{0};
int gemm_k_size{0};
const int* ell_idx = nullptr;
int ell_ncol{0};
int ell_blocksize{0};
int ell_base_idx{0};
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
const int* ell_idx,
int ell_ncol,
int ell_blocksize,
int ell_base_idx,
typename OutputOp::Params output_op = typename OutputOp::Params(),
int *workspace = nullptr
):
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_C(ref_C.layout()),
ref_C(ref_C),
params_D(ref_D.layout()),
ref_D(ref_D),
output_op(output_op),
ell_idx(ell_idx),
ell_ncol(ell_ncol),
ell_blocksize(ell_blocksize),
ell_base_idx(ell_base_idx)
{
int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
semaphore = workspace;
}
};
/// Shared memory storage structure
struct SharedStorage {
union{
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
typename cutlass::transform::threadblock::ell::SharedStorage ell;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
EllGemm() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D) {
static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if (!TensorRef_aligned(ref_A, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_D, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int tile_in_ell_block = (params.ell_blocksize + Mma::Shape::kN - 1 ) / Mma::Shape::kN;
int ell_block_offset_n = threadblock_tile_offset.n() / tile_in_ell_block;
int tile_offset_n = threadblock_tile_offset.n() % tile_in_ell_block;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
typename Mma::FragmentC accumulators;
accumulators.clear();
// skip computation if matrix is 0
if (params.ell_ncol > 0) {
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
ell_block_offset_n * params.ell_blocksize
+ tile_offset_n * Mma::Shape::kN,
};
int ell_idx_start =
(threadblock_tile_offset.n() / tile_in_ell_block) *
(params.ell_ncol / params.ell_blocksize);
const int* ell_idx_ptr = &(params.ell_idx[ell_idx_start]);
// Problem size is a function of threadblock index in the K dimension
int problem_size_k = min(
params.problem_size.k(),
(threadblock_tile_offset.k() + 1) * params.gemm_k_size);
problem_size_k = min(problem_size_k, params.ell_ncol);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations =
(problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Define coef for ELL index depending on LayoutA
int ell_stride = iterator_A.get_stride();
typename cutlass::transform::threadblock::ell::Iterator ell_iterator(
shared_storage.ell,
ell_idx_ptr,
params.ell_blocksize,
params.ell_base_idx,
Mma::Shape::kK,
problem_size_k,
ell_stride,
thread_idx
);
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
if (!kSplitKSerial || gemm_k_iterations > 0) {
// check if index computations can be skipped
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
constexpr bool is_double = (sizeof(Mma::IteratorA::Element) == 8);
constexpr bool is_multiple_alignment =
(kAlignmentA > 1) && (kAlignmentB > 1) && (kAlignmentC > 1);
const bool is_specialized_blocksize =
((params.ell_blocksize) & (params.ell_blocksize-1)) == 0
&& params.ell_blocksize >= Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
if ((is_double || is_multiple_alignment) && is_specialized_blocksize) {
mma.operator()<false, true>(
gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator);
}
else {
mma.operator()<false, false>(
gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator);
}
}
} // if (params.ell_ncols > 0)
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
ell_block_offset_n = threadblock_tile_offset.n() / tile_in_ell_block;
tile_offset_n = threadblock_tile_offset.n() % tile_in_ell_block;
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
ell_block_offset_n * params.ell_blocksize
+ tile_offset_n * Mma::Shape::kN
);
//avoid out of bounds
MatrixCoord threadblock_extent(
min(params.problem_size.m(),
(threadblock_tile_offset.m()+1) * Mma::Shape::kM),
min(params.problem_size.n(),
ell_block_offset_n * params.ell_blocksize
+ min((tile_offset_n + 1) * Mma::Shape::kN, params.ell_blocksize))
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// If performing a reduction via split-K, fetch the initial synchronization
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
params.ref_C.data(),
threadblock_extent,
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
threadblock_extent,
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op, iterator_D, accumulators, iterator_C);
//
// Release the semaphore
//
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/ell_gemm.h/0 | {
"file_path": "include/cutlass/gemm/kernel/ell_gemm.h",
"repo_id": "include",
"token_count": 12123
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
FillMode FillModeC_ ///! Fill Mode for C (kLower or kUpper)
>
struct RankKUniversal {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static FillMode const kFillModeC = FillModeC_;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = 128 / sizeof_bits<ElementA>::value;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode{GemmUniversalMode::kGemm};
GemmCoord problem_size{};
int batch_count{1};
typename EpilogueOutputOp::Params epilogue{};
void const * ptr_A{nullptr};
void const * ptr_C{nullptr};
void * ptr_D{nullptr};
int64_t batch_stride_A{0};
int64_t batch_stride_C{0};
int64_t batch_stride_D{0};
typename LayoutA::Stride::Index lda{};
typename LayoutB::Stride::Index ldb{};
typename LayoutC::Stride::Index ldc{};
typename LayoutC::Stride::Index ldd{};
bool allow_early_exit{false};
//
// Methods
//
Arguments() = default;
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::Index lda,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd,
bool allow_early_exit = false
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
lda(lda), ldb(0),
ldc(ldc), ldd(ldd),
allow_early_exit(allow_early_exit) {
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size{};
cutlass::gemm::GemmCoord grid_tiled_shape{};
int swizzle_log_tile{0};
typename Mma::IteratorA::Params params_A{};
typename Mma::IteratorB::Params params_B{};
typename Epilogue::OutputTileIterator::Params params_C{};
typename Epilogue::OutputTileIterator::Params params_D{};
typename EpilogueOutputOp::Params output_op{};
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
int batch_count{0};
int gemm_k_size{0};
void * ptr_A{nullptr};
void * ptr_B{nullptr};
void * ptr_C{nullptr};
void * ptr_D{nullptr};
int64_t batch_stride_A{0};
int64_t batch_stride_B{0};
int64_t batch_stride_C{0};
int64_t batch_stride_D{0};
int *semaphore{nullptr};
bool allow_early_exit{false};
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
int gemm_k_size,
void *workspace = nullptr
):
problem_size(args.problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(args.lda),
params_B(args.lda),
params_C(args.ldc),
params_D(args.ldd),
output_op(args.epilogue),
mode(args.mode),
batch_count(args.batch_count),
gemm_k_size(gemm_k_size),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_A)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(const_cast<void *>(args.ptr_D)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_A),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
semaphore(static_cast<int *>(workspace)),
allow_early_exit(args.allow_early_exit) {
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr) {
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_A);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
output_op = args.epilogue;
semaphore = static_cast<int *>(workspace);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Methods
//
CUTLASS_DEVICE
RankKUniversal() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit following LAPACK's definition
if (params.allow_early_exit &&
(params.output_op.alpha == ElementC(0)) && (params.output_op.beta == ElementC(1))) {
return;
}
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Early exit if Fill Mode is Lower and
// if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal)
if (kFillModeC == cutlass::FillMode::kLower &&
(threadblock_tile_offset.m() + 1) * Mma::Shape::kM <= threadblock_tile_offset.n() * Mma::Shape::kN) {
return;
}
// Early exit if Fill Mode is Upper and
// if the entire tile is below the main diagonal (top-right corner is at or below the diagonal)
if (kFillModeC == cutlass::FillMode::kUpper &&
threadblock_tile_offset.m() * Mma::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma::Shape::kN) {
return;
}
bool tile_on_diagonal = false;
// Mark tiles that are being crossed by the main diagonal
// (top-right and bottom-left corners are on either side of the diagonal)
if ((threadblock_tile_offset.m() + 1) * Mma::Shape::kM > threadblock_tile_offset.n() * Mma::Shape::kN
&& threadblock_tile_offset.m() * Mma::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma::Shape::kN) {
tile_on_diagonal = true;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// If CTA not on diagonal, FillMode doesn't apply.
FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/rank_k_universal.h/0 | {
"file_path": "include/cutlass/gemm/kernel/rank_k_universal.h",
"repo_id": "include",
"token_count": 6750
} | 39 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/fast_math.h"
#include "cutlass/gemm_coord.hpp"
#include "cutlass/kernel_hardware_info.hpp"
#include "cutlass/gemm/kernel/tile_scheduler_params.h"
#include "cute/layout.hpp"
#include "cute/tensor.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/pipeline/pipeline.hpp"
namespace cutlass::gemm::kernel::detail {
///////////////////////////////////////////////////////////////////////////////
// Users are not supposed to use this class directly.
// This is a CRTP base class for the actual tile schedulers.
template<class Subclass>
class StaticPersistentTileScheduler {
//
// Data members
//
private:
uint64_t current_work_linear_idx_;
uint64_t total_grid_size_;
public:
struct WorkTileInfo {
int32_t M_idx = 0;
int32_t N_idx = 0;
int32_t L_idx = 0;
bool is_valid_tile = false;
CUTLASS_HOST_DEVICE
bool
is_valid() const {
return is_valid_tile;
}
CUTLASS_HOST_DEVICE
static WorkTileInfo
invalid_work_tile() {
return {-1, -1, -1, false};
}
CUTLASS_HOST_DEVICE
bool
is_final_split(uint32_t k_tiles_per_output_tile) const {
return true;
}
CUTLASS_HOST_DEVICE
int32_t
reduction_subtile_idx() const {
return -1;
}
};
using Params = PersistentTileSchedulerSm90Params;
using RasterOrder = typename Params::RasterOrder;
using RasterOrderOptions = typename Params::RasterOrderOptions;
public:
struct Arguments {
int max_swizzle_size = 1;
RasterOrderOptions raster_order = RasterOrderOptions::Heuristic;
};
template <class ProblemShapeMNKL, class TileShape, class ClusterShape>
static Params
to_underlying_arguments(
ProblemShapeMNKL problem_shape_mnkl,
TileShape tile_shape,
ClusterShape cluster_shape,
[[maybe_unused]] KernelHardwareInfo const& hw_info,
Arguments const& arguments,
[[maybe_unused]] void* workspace=nullptr,
[[maybe_unused]] const uint32_t epilogue_subtile = 1) {
// We only need the tile and cluster shape during scheduler setup, so let FTAD do the magic
static_assert(cute::is_static<TileShape>::value);
static_assert(cute::is_static<ClusterShape>::value);
dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape);
Params params;
params.initialize(
problem_blocks,
to_gemm_coord(cluster_shape),
hw_info,
arguments.max_swizzle_size,
arguments.raster_order
);
return params;
}
CUTLASS_HOST_DEVICE
static bool
can_implement(Arguments const& args) {
return true;
}
CUTLASS_HOST_DEVICE
StaticPersistentTileScheduler() { }
CUTLASS_DEVICE explicit StaticPersistentTileScheduler(Params const& params_) : scheduler_params(params_) {
// MSVC requires protecting use of CUDA-specific nonstandard syntax,
// like blockIdx and gridDim, with __CUDA_ARCH__.
#if defined(__CUDA_ARCH__)
if (params_.raster_order_ == RasterOrder::AlongN) {
current_work_linear_idx_ = uint64_t(blockIdx.x) + uint64_t(blockIdx.y) * uint64_t(gridDim.x);
}
else {
current_work_linear_idx_ = uint64_t(blockIdx.x) * uint64_t(gridDim.y) + uint64_t(blockIdx.y);
}
total_grid_size_ = uint64_t(gridDim.x) * uint64_t(gridDim.y) * uint64_t(gridDim.z);
#else
CUTLASS_ASSERT(false && "This line should never be reached");
#endif
}
// Returns the initial work tile info that will be computed over
template <class ClusterShape>
CUTLASS_DEVICE
WorkTileInfo
initial_work_tile_info(ClusterShape cluster_shape) {
return get_current_work();
}
CUTLASS_DEVICE
WorkTileInfo
get_current_work() const {
return get_current_work_for_linear_idx(current_work_linear_idx_);
}
CUTLASS_DEVICE
WorkTileInfo
get_current_work_for_linear_idx(uint64_t linear_idx) const {
if (linear_idx >= scheduler_params.blocks_per_problem_) {
return WorkTileInfo::invalid_work_tile();
}
// Map worker's linear index into the CTA tiled problem shape to the corresponding MNL indices
uint64_t work_idx_l, remainder;
scheduler_params.divmod_batch_(work_idx_l, remainder, linear_idx);
uint64_t blk_per_grid_dim = scheduler_params.divmod_cluster_shape_minor_.divide(remainder);
auto [work_idx_m, work_idx_n] = Subclass::get_work_idx_m_and_n(blk_per_grid_dim,
scheduler_params.divmod_cluster_shape_major_,
scheduler_params.divmod_cluster_shape_minor_,
scheduler_params.divmod_cluster_blk_major_,
scheduler_params.log_swizzle_size_,
scheduler_params.raster_order_);
return {work_idx_m, work_idx_n, static_cast<int32_t>(work_idx_l), true};
}
CUTLASS_DEVICE
void
advance_to_next_work(uint32_t advance_count = 1) {
current_work_linear_idx_ += total_grid_size_ * uint64_t(advance_count);
}
// Computes the linear index within a batch given M and N tile offsets within the batch.
// This essentially inverts the mapping performed in get_work_idx_m_and_n
static CUTLASS_DEVICE
uint64_t
get_linear_idx_from_m_and_n(
int32_t tile_m,
int32_t tile_n,
FastDivmodU64Pow2 const& divmod_cluster_shape_major,
FastDivmodU64Pow2 const& divmod_cluster_shape_minor,
FastDivmodU64 const& divmod_cluster_blk_major,
int32_t log_swizzle_size,
RasterOrder raster_order) {
auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster();
uint64_t minor_work_idx, major_work_idx, cluster_minor_offset;
if (raster_order == RasterOrder::AlongN) {
minor_work_idx = static_cast<uint64_t>(tile_m);
major_work_idx = static_cast<uint64_t>(tile_n);
cluster_minor_offset = cta_m_in_cluster;
}
else {
major_work_idx = static_cast<uint64_t>(tile_m);
minor_work_idx = static_cast<uint64_t>(tile_n);
cluster_minor_offset = cta_n_in_cluster;
}
uint64_t cluster_idx_minor, cluster_idx_major, cluster_major_offset;
cluster_idx_minor = divmod_cluster_shape_minor.divide(minor_work_idx - cluster_minor_offset);
divmod_cluster_shape_major(cluster_idx_major, cluster_major_offset, major_work_idx);
uint64_t cluster_idx_minor_div_swizzle = cluster_idx_minor >> log_swizzle_size;
uint64_t offset = cluster_idx_minor & ((1 << log_swizzle_size) - 1);
uint64_t extra = cluster_idx_minor_div_swizzle * divmod_cluster_blk_major.divisor + cluster_idx_major;
uint64_t cluster_id = (extra << log_swizzle_size) | offset;
return (cluster_id * divmod_cluster_shape_major.divisor + cluster_major_offset) * divmod_cluster_shape_minor.divisor + cluster_minor_offset;
}
// Given the inputs, computes the total number of output blocks over which this problem will compute.
// Note that this is only the logical size of our grid, not the physical grid we will actually launch.
template<class ProblemShapeMNKL, class BlockShape, class ClusterShape>
CUTLASS_HOST_DEVICE static
dim3
get_tiled_cta_shape_mnl(ProblemShapeMNKL problem_shape_mnkl, BlockShape cta_shape, ClusterShape cluster_shape) {
auto cta_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shape_mnkl), cute::shape<0>(cta_shape)));
auto cta_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shape_mnkl), cute::shape<1>(cta_shape)));
return Params::get_tiled_cta_shape_mnl(
to_gemm_coord(problem_shape_mnkl),
to_gemm_coord(cluster_shape),
cta_m, cta_n
);
}
// Kernel helper function to get next work ID
template <class WorkIdPipeline, class WorkIdPipelineState>
CUTLASS_DEVICE
auto
fetch_next_work(
WorkTileInfo work_tile_info,
WorkIdPipeline& work_id_pipeline,
WorkIdPipelineState work_id_pipe_consumer_state) {
WorkTileInfo new_work_tile_info;
advance_to_next_work();
new_work_tile_info = get_current_work();
// Return true to indicate that the WorkID pipeline state should be advanced
return cute::make_tuple(new_work_tile_info, true);
}
CUTLASS_DEVICE
static auto
work_tile_to_cta_coord(WorkTileInfo work_tile_info) {
// Get every cta coord in three dimensions of the cluster
auto [cta_m_in_cluster, cta_n_in_cluster, cta_l_in_cluster] = cute::block_id_in_cluster();
return make_coord(
work_tile_info.M_idx + static_cast<int32_t>(cta_m_in_cluster),
work_tile_info.N_idx + static_cast<int32_t>(cta_n_in_cluster),
_,
work_tile_info.L_idx + static_cast<int32_t>(cta_l_in_cluster)
);
}
// Given the inputs, computes the physical grid we should launch.
template<class ProblemShapeMNKL, class BlockShape, class ClusterShape>
CUTLASS_HOST_DEVICE static
dim3
get_grid_shape(
ProblemShapeMNKL problem_shape_mnk,
BlockShape cta_shape,
ClusterShape cluster_shape,
KernelHardwareInfo hw_info,
Arguments arguments,
bool truncate_by_problem_size=true) {
auto problem_shape_mnkl = cute::append<4>(problem_shape_mnk, cute::Int<1>{});
dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, cta_shape, cluster_shape);
return Params::get_grid_shape(
problem_blocks,
to_gemm_coord(cluster_shape),
hw_info,
arguments.max_swizzle_size,
arguments.raster_order,
/* truncate_by_problem_size = */true
);
}
// Given the inputs, computes the physical grid we should launch.
template<class ProblemShapeMNKL, class BlockShape, class ClusterShape>
CUTLASS_HOST_DEVICE static
dim3
get_grid_shape(
Params const& params,
ProblemShapeMNKL problem_shape_mnk,
BlockShape cta_shape,
ClusterShape cluster_shape,
KernelHardwareInfo hw_info) {
auto problem_shape_mnkl = cute::append<4>(problem_shape_mnk, cute::Int<1>{});
dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, cta_shape, cluster_shape);
Arguments args{};
if constexpr (!std::is_const_v<decltype(args.max_swizzle_size)>) {
args.max_swizzle_size = 1 << params.log_swizzle_size_;
}
args.raster_order = params.raster_order_ == RasterOrder::AlongN ? RasterOrderOptions::AlongN : RasterOrderOptions::AlongM;
return Params::get_grid_shape(
problem_blocks,
to_gemm_coord(cluster_shape),
hw_info,
args.max_swizzle_size,
args.raster_order,
/* truncate_by_problem_size = */true
);
}
// Convert CTA-level work tile info to cluster-level tile coord
CUTLASS_DEVICE
auto
work_tile_to_cluster_coord_mnkl(WorkTileInfo work_tile_info) const {
// TileScheduler works at CTA-level, kernel works at cluster-level
int m_coord = idx2crd(work_tile_info.M_idx / scheduler_params.cluster_shape_m_,
scheduler_params.problem_tiles_m_);
int n_coord = idx2crd(work_tile_info.N_idx / scheduler_params.cluster_shape_n_,
scheduler_params.problem_tiles_n_);
int l_coord = idx2crd(work_tile_info.L_idx,
scheduler_params.problem_tiles_l_);
return make_coord(m_coord, n_coord, _, l_coord);
}
// Returns whether the block assigned this work should compute the epilogue for the corresponding
// output tile. For the basic tile scheduler, this is always true.
CUTLASS_HOST_DEVICE
static bool
compute_epilogue(WorkTileInfo const&, Params const&) {
return true;
}
CUTLASS_HOST_DEVICE
static bool
compute_epilogue(WorkTileInfo const&) {
return true;
}
// Performs the reduction across splits for a given output tile. Since this scheduler does
// not split output tiles, no reduction is needed.
template <class FrgTensorC>
CUTLASS_DEVICE
static void
fixup(Params const&, WorkTileInfo const&, FrgTensorC&, uint32_t, uint32_t) {}
// Performs the reduction across splits for a given output tile. No fixup is required for
// work units returned by this scheduler.
template <class FrgTensorC>
CUTLASS_DEVICE
void
fixup(WorkTileInfo const&, FrgTensorC&, uint32_t, uint32_t) const { }
// Returns whether the current WorkTileInfo passed in should continue to be used. Since
// this scheduler only schedules work in units of single, full output tiles, the WorkTileInfo
// passed in should not be used after having been processed.
CUTLASS_DEVICE
static bool
continue_current_work(WorkTileInfo&) {
return false;
}
template <class ProblemShape, class TileShape>
CUTLASS_HOST_DEVICE
static int
get_work_k_tile_count(WorkTileInfo const& work_tile_info, ProblemShape problem_shape, TileShape tile_shape) {
// All work units returned by this scheduler cover the entire K iteration
// space of the output tile assigned to the work unit.
return cute::size(cute::ceil_div(cute::get<2>(problem_shape), cute::get<2>(tile_shape)));
}
CUTLASS_HOST_DEVICE
static uint32_t
get_work_k_tile_start(WorkTileInfo const&) {
// All work units returned by this scheduler start from K tile 0
return 0u;
}
CUTLASS_DEVICE
static bool
need_separate_reduction(Params const& params) {
return false;
}
CUTLASS_DEVICE
bool
is_work_tile_for_reduction(WorkTileInfo const& work_tile_info, Params const& params) {
return false;
}
template <class FrgTensorC>
CUTLASS_DEVICE
void
separate_reduction(
Params const& params,
WorkTileInfo const& work_tile_info,
FrgTensorC& accumulators,
uint32_t num_barriers,
uint32_t barrier_idx) {
}
// Shares the accumulator set with peers in the global workspace
template <class FrgTensorC>
CUTLASS_DEVICE
static void
share(
Params const& params,
WorkTileInfo const& work_tile_info,
FrgTensorC& accumulators,
uint32_t num_barriers,
uint32_t barrier_idx) {
}
CUTLASS_DEVICE
static bool
valid_warpgroup_in_work_tile(WorkTileInfo const& work_tile_info) {
return true;
}
CUTLASS_DEVICE
static bool
requires_separate_reduction(Params const& params) {
return false;
}
public:
// Sink scheduler params as a member
Params scheduler_params;
};
} // namespace cutlass::gemm::kernel::detail
| include/cutlass/gemm/kernel/static_tile_scheduler.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/static_tile_scheduler.hpp",
"repo_id": "include",
"token_count": 6191
} | 40 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming
expectations about data layout of the global memory fragments, data types,
and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp
instructions.
SM80 Multi stage kernel expects stage number to be larger or equal to 3
to use asyncronous copy.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core.h"
#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h"
#include "cutlass/gemm/threadblock/mma_multistage.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for double-precision
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::ColumnMajor, double, layout::ColumnMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::ColumnMajor;
using ElementB = double;
using LayoutB = layout::ColumnMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 64;
/// Default Operator
using Operator = Operator_;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
/// Partial specialization for double-precision
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::ColumnMajor, double, layout::RowMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::ColumnMajor;
using ElementB = double;
using LayoutB = layout::RowMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 64;
/// Default Operator
using Operator = Operator_;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b;
// Shared memory layout
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for double-precision
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::RowMajor, double, layout::ColumnMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::RowMajor;
using ElementB = double;
using LayoutB = layout::ColumnMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 64;
/// Default Operator
using Operator = Operator_;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise;
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
///
/// Partial specialization for double-precision
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::RowMajor, double, layout::RowMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::RowMajor;
using ElementB = double;
using LayoutB = layout::RowMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 64;
/// Default Operator
using Operator = Operator_;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise;
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for double-precision
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::AffineRank2ColumnMajor, double, layout::AffineRank2ColumnMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::AffineRank2ColumnMajor;
using ElementB = double;
using LayoutB = layout::AffineRank2ColumnMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::ColumnMajor,
ElementB,
layout::ColumnMajor,
ElementC,
LayoutC,
arch::OpClassTensorOp,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/// Partial specialization for double-precision
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::AffineRank2ColumnMajor, double, layout::AffineRank2RowMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::AffineRank2ColumnMajor;
using ElementB = double;
using LayoutB = layout::AffineRank2RowMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::ColumnMajor,
ElementB,
layout::RowMajor,
ElementC,
LayoutC,
arch::OpClassTensorOp,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for double-precision
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::AffineRank2RowMajor, double, layout::AffineRank2ColumnMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::AffineRank2RowMajor;
using ElementB = double;
using LayoutB = layout::AffineRank2ColumnMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementC,
LayoutC,
arch::OpClassTensorOp,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
////////////////////////////////////////////////////////////////////////////////
///
/// Partial specialization for double-precision
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::AffineRank2RowMajor, double, layout::AffineRank2RowMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::AffineRank2RowMajor;
using ElementB = double;
using LayoutB = layout::AffineRank2RowMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::RowMajor,
ElementC,
LayoutC,
arch::OpClassTensorOp,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float-precision
///
/// ElementA: complex<float>
/// ElementB: complex<float>
/// ElementC: complex<float>
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Layout for A operand
typename LayoutA_,
/// Layout for B operand
typename LayoutB_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// per-element transformation for elements of A
ComplexTransform TransformA_,
/// per-element transformation for elements of B
ComplexTransform TransformB_
>
struct DefaultMmaCore<
Shape_, WarpShape_, GemmShape<16, 8, 8>,
complex<float>, LayoutA_,
complex<float>, LayoutB_,
complex<float>, LayoutC_,
arch::OpClassTensorOp,
Stages,
Operator_,
false,
CacheOpA,
CacheOpB,
TransformA_, TransformB_, true> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<16, 8, 8>;
using ElementA = complex<float>;
using LayoutA = LayoutA_;
using ElementB = complex<float>;
using LayoutB = LayoutB_;
using ElementC = complex<float>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
static const ComplexTransform TransformA = TransformA_;
static const ComplexTransform TransformB = TransformB_;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
static_assert(
platform::is_same<Operator, arch::OpMultiplyAddComplex>::value ||
platform::is_same<Operator, arch::OpMultiplyAddGaussianComplex>::value ||
platform::is_same<Operator, arch::OpMultiplyAddComplexFastF32>::value,
"The operator tag must indicate complex multiplication.");
//
// Underlying template
//
using MmaComplexCore = DefaultMultistageMmaComplexCore<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
arch::OpClassTensorOp,
kStages,
TransformA,
TransformB,
Operator,
kCacheOpA,
kCacheOpB
>;
//
// Shared memory layouts
//
using SmemLayoutA = typename MmaComplexCore::SmemLayoutA;
// Shared memory layout
using SmemLayoutB = typename MmaComplexCore::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename MmaComplexCore::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename MmaComplexCore::SmemIteratorA;
/// ThreadMap of iterator B
using IteratorThreadMapB = typename MmaComplexCore::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename MmaComplexCore::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename MmaComplexCore::MmaTensorOp;
/// Policy used to define MmaPipelined
using MmaPolicy = typename MmaComplexCore::MmaPolicy;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for double-precision
///
/// ElementA: complex<double>
/// ElementB: complex<double>
/// ElementC: complex<double>
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout for A operand
typename LayoutA_,
/// Layout for B operand
typename LayoutB_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// per-element transformation for elements of A
ComplexTransform TransformA_,
/// per-element transformation for elements of B
ComplexTransform TransformB_
>
struct DefaultMmaCore<
Shape_, WarpShape_, InstructionShape_,
complex<double>, LayoutA_,
complex<double>, LayoutB_,
complex<double>, LayoutC_,
arch::OpClassTensorOp,
Stages,
Operator_,
false,
CacheOpA,
CacheOpB,
TransformA_, TransformB_, true> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = complex<double>;
using LayoutA = LayoutA_;
using ElementB = complex<double>;
using LayoutB = LayoutB_;
using ElementC = complex<double>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
static const ComplexTransform TransformA = TransformA_;
static const ComplexTransform TransformB = TransformB_;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 64;
/// Default Operator
using Operator = Operator_;
static_assert(
platform::is_same<Operator, arch::OpMultiplyAddComplex>::value ||
platform::is_same<Operator, arch::OpMultiplyAddGaussianComplex>::value,
"The operator tag must indicate complex multiplication.");
//
// Underlying template
//
using MmaComplexCore = DefaultMultistageMmaComplexCore<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
arch::OpClassTensorOp,
kStages,
TransformA,
TransformB,
Operator,
kCacheOpA,
kCacheOpB
>;
//
// Shared memory layouts
//
using SmemLayoutA = typename MmaComplexCore::SmemLayoutA;
// Shared memory layout
using SmemLayoutB = typename MmaComplexCore::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename MmaComplexCore::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename MmaComplexCore::SmemIteratorA;
/// ThreadMap of iterator B
using IteratorThreadMapB = typename MmaComplexCore::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename MmaComplexCore::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename MmaComplexCore::MmaTensorOp;
/// Policy used to define MmaPipelined
using MmaPolicy = typename MmaComplexCore::MmaPolicy;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
platform::min(Shape::kM / (kAccessSizeInBits / sizeof_bits<ElementA>::value), 8);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
platform::min(Shape::kN / (kAccessSizeInBits / sizeof_bits<ElementB>::value), 8);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
static int const Crosswise_A = platform::min(int(128 / sizeof(ElementA)),
Shape::kM);
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, Crosswise_A>;
// Shared memory layout
static int const Crosswise_B = platform::min(int(128 / sizeof(ElementB)),
Shape::kN);
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, Crosswise_B>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementB>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
platform::min(Shape::kM / (kAccessSizeInBits / sizeof_bits<ElementA>::value), 8);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
static int const Crosswise_A = platform::min(int(128 / sizeof(ElementA)),
Shape::kM);
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, Crosswise_A>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
platform::min(Shape::kN / (kAccessSizeInBits / sizeof_bits<ElementB>::value), 8);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
static int const Crosswise_B = platform::min(int(128 / sizeof(ElementB)),
Shape::kN);
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, Crosswise_B>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major-interleaved
/// B: row-major-interleaved
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
///
/// Column/RowMajorInterleved<InterleavedK>(m, n) is mapped to Column/RowMajor(m
/// x InterleavedK, n / InterleavedK) so that Column/RowMajor global iterators
/// can be reused. The shared store iterator is the same as the crosswise shared
/// store iterator. So, the only thing we need to do is to swap the coordinates
/// (contiguous <=> strided) used by the global iterator and the shared store
/// iterator.
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Number of interleaved K
int InterleavedK>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajorInterleaved<InterleavedK>, ElementB_,
layout::RowMajorInterleaved<InterleavedK>, ElementC_,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
AccumulatorsInRowMajor, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementB = ElementB_;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kInterleavedK = InterleavedK;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kElementsPerAccess =
kAccessSizeInBits / sizeof_bits<ElementA>::value;
static int const kWarpThreadArrangementContiguous =
kInterleavedK / kElementsPerAccess;
static int const kWarpThreadArrangementStrided =
kWarpSize / kWarpThreadArrangementContiguous;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, kInterleavedK>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, kInterleavedK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedK,
Shape::kK / kInterleavedK>,
kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMap<
IteratorThreadMapA,
layout::PitchLinearShape<kWarpThreadArrangementContiguous,
kWarpThreadArrangementStrided>>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
SmemThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN * kInterleavedK,
Shape::kK / kInterleavedK>,
kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapB = transform::TransposePitchLinearThreadMap<
IteratorThreadMapB,
layout::PitchLinearShape<kWarpThreadArrangementContiguous,
kWarpThreadArrangementStrided>>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
SmemThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK, AccumulatorsInRowMajor>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
// Shared memory layout
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
SmemThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4;
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static_assert(!((Shape::kK / 32) % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, Shape::kK / 32>,
WarpCount::kK>;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
// Shared memory layout
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4;
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK>;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
// Shared memory layout
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
SmemThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
SmemThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4;
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static_assert(!((Shape::kK / 32) % LaneM) && !((Shape::kK / 32) % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<Shape::kK / 32, 0>,
MatrixShape<0, Shape::kK / 32>,
WarpCount::kK>;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
// Shared memory layout
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
SmemThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4;
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static_assert(!((Shape::kK / 32) % LaneM),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<Shape::kK / 32, 0>,
MatrixShape<0, 0>,
WarpCount::kK>;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2RowMajor,
ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::ColumnMajor,
ElementB,
layout::RowMajor,
ElementC,
LayoutC,
arch::OpClassSimt,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementC,
LayoutC,
arch::OpClassSimt,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::ColumnMajor,
ElementB,
layout::ColumnMajor,
ElementC,
LayoutC,
arch::OpClassSimt,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2RowMajor, ElementC_,
LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::RowMajor,
ElementC,
LayoutC,
arch::OpClassSimt,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/default_mma_core_sm80.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_mma_core_sm80.h",
"repo_id": "include",
"token_count": 38063
} | 41 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a multistage threadblock-scoped Blocked-Ell MMA.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class EllMmaMultistage :
public MmaBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = MmaBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy = Policy_;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
using EllIterator = typename cutlass::transform::threadblock::ell::Iterator;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Minimum architecture is Sm80 to support cp.async
using ArchTag = arch::Sm80;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
/// Internal structure exposed for introspection.
struct Detail {
static_assert(Base::kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
};
private:
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
EllMmaMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
template<bool is_A_sparse, bool is_offset_constant>
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B, EllIterator &ell_iter,
int group_start_A = 0, int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A *
IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_A.get();
bool is_valid = iterator_A.valid();
if (!is_A_sparse){
if (is_offset_constant){
auto ell_offset = ell_iter.get_offset_fast();
is_valid = is_valid && (ell_offset >= 0);
gmem_ptr += ell_offset * sizeof(IteratorA::Element) / kSrcBytes;
} else {
int k_offset = iterator_A.get_k();
auto ell_offset = ell_iter.get_offset(k_offset);
is_valid = is_valid && (ell_offset >= 0);
gmem_ptr += (ell_offset * sizeof(IteratorA::Element)) / kSrcBytes;
}
}
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, is_valid);
++iterator_A;
}
++this->smem_iterator_A_;
}
}
iterator_B.set_iteration_index(group_start_B *
IteratorB::kAccessesPerVector);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B.get();
bool is_valid = iterator_B.valid();
if (is_A_sparse){
if (is_offset_constant){
auto ell_offset = ell_iter.get_offset_fast();
is_valid = is_valid && (ell_offset >= 0);
gmem_ptr += ell_offset * sizeof(IteratorB::Element) / kSrcBytes;
} else {
int k_offset = iterator_B.get_k();
auto ell_offset = ell_iter.get_offset(k_offset);
is_valid = is_valid && (ell_offset >= 0);
gmem_ptr += ( ell_offset * sizeof(IteratorB::Element)) / kSrcBytes;
}
}
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, is_valid);
++iterator_B;
}
++this->smem_iterator_B_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
template<bool is_A_sparse, bool is_offset_constant>
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< initial value of accumulator
FragmentC const &src_accum,
EllIterator &ell_iterator
) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
auto gmem_ptr = iterator_A.get();
bool is_valid = iterator_A.valid();
if (!is_A_sparse){
if (is_offset_constant){
auto ell_offset = ell_iterator.get_offset_fast();
is_valid = is_valid && (ell_offset >= 0);
gmem_ptr += ell_offset * sizeof(IteratorA::Element) / kSrcBytes;
} else {
int k_offset = iterator_A.get_k();
auto ell_offset = ell_iterator.get_offset(k_offset);
is_valid = is_valid && (ell_offset >= 0);
gmem_ptr += (ell_offset * sizeof(IteratorA::Element)) / kSrcBytes;
}
}
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, is_valid);
++iterator_A;
}
++this->smem_iterator_A_;
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
auto gmem_ptr = iterator_B.get();
bool is_valid = iterator_B.valid();
if (is_A_sparse){
if (is_offset_constant){
auto ell_offset = ell_iterator.get_offset_fast();
is_valid = is_valid && (ell_offset >= 0);
gmem_ptr += ell_offset * sizeof(IteratorB::Element) / kSrcBytes;
} else {
int k_offset = iterator_B.get_k();
auto ell_offset = ell_iterator.get_offset(k_offset);
is_valid = is_valid && (ell_offset >= 0);
gmem_ptr += ( ell_offset * sizeof(IteratorB::Element)) / kSrcBytes;
}
}
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, is_valid);
++iterator_B;
}
++this->smem_iterator_B_;
}
// Move to the next stage
iterator_A.add_tile_offset({0, 1});
iterator_B.add_tile_offset({1, 0});
++ell_iterator;
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Waits until kStages-2 stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[2];
WarpLoadedFragmentB warp_loaded_frag_B[2];
WarpTransformedFragmentA warp_transformed_frag_A[2];
WarpTransformedFragmentB warp_transformed_frag_B[2];
Operator warp_mma;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
if (is_A_sparse){
iterator_A.ell_add_mask(ell_iterator.get_blocksize());
}
else {
iterator_B.ell_add_mask(ell_iterator.get_blocksize());
}
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0],
warp_loaded_frag_A[0], warp_loaded_frag_B[0]);
// tf32x3 kernels use staging accumulation. warp_mma uses a temporary
// accumulator and this temporary accumulator is added to the final
// accumulator once in every mainloop iteration.
plus<FragmentC> plus_accum;
FragmentC tmp_accum;
if (platform::is_same<typename Operator::MathOperator,
arch::OpMultiplyAddFastF32>::value
|| platform::is_same<typename Operator::MathOperator,
arch::OpMultiplyAddComplexFastF32>::value) {
tmp_accum.clear();
}
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k > 0)
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B[warp_mma_k % 2]);
if (platform::is_same<typename Operator::MathOperator,
arch::OpMultiplyAddFastF32>::value
|| platform::is_same<typename Operator::MathOperator,
arch::OpMultiplyAddComplexFastF32>::value) {
warp_mma(
tmp_accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
tmp_accum
);
if (warp_mma_k == 0) {
accum = plus_accum(accum, tmp_accum);
tmp_accum.clear();
}
} else {
warp_mma(
accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
accum
);
}
// Issue global->shared copies for the this stage
if (warp_mma_k < Base::kWarpGemmIterations - 1) {
int group_start_iteration_A, group_start_iteration_B;
group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA;
group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB;
copy_tiles_and_advance<is_A_sparse, is_offset_constant>(
iterator_A, iterator_B, ell_iterator, group_start_iteration_A,
group_start_iteration_B);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
int group_start_iteration_A, group_start_iteration_B;
group_start_iteration_A =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB;
copy_tiles_and_advance<is_A_sparse, is_offset_constant>(
iterator_A, iterator_B, ell_iterator, group_start_iteration_A,
group_start_iteration_B);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A.add_tile_offset({0, 1});
iterator_B.add_tile_offset({1, 0});
++ell_iterator;
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
}
// Do any conversions feeding the first stage at the end of the loop so
// we can start right away on mma instructions
if (warp_mma_k + 1 == Base::kWarpGemmIterations)
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
}
}
if (platform::is_same<typename Operator::MathOperator,
arch::OpMultiplyAddFastF32>::value
|| platform::is_same<typename Operator::MathOperator,
arch::OpMultiplyAddComplexFastF32>::value) {
accum = plus_accum(accum, tmp_accum);
}
// Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/ell_mma_multistage.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/ell_mma_multistage.h",
"repo_id": "include",
"token_count": 10488
} | 42 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
////////////////////////////////////////////////////////////////////////////////
// Shuffle registers for layout conversion
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for the operand in registers for the mma.sync
typename ElementMma_,
/// Element type for the operand in shared memory for ldmatrix
typename ElementLoad_,
/// Number of mma.sync operations performed along rows or columns
int NumMmaInstructions,
/// Number of elements in warp fragment
int NumElementsInWarpFragment,
/// Number of elements in mma fragment
int NumElementsInMmaFragment,
/// Identifies A or B multiplicand
Operand Operand_,
///
typename Enable = void >
struct FragmentShuffler {
public:
using ElementMma = ElementMma_;
using ElementLoad = ElementLoad_;
static int const kNumMmaInstructions = NumMmaInstructions;
static int const kNumElementsInWarpFragment = NumElementsInWarpFragment;
static int const kNumElementsInMmaFragment = NumElementsInMmaFragment;
static Operand const kOperand = Operand_;
using WarpFragment = Array<ElementLoad, kNumElementsInWarpFragment>;
using MmaFragment = Array<ElementLoad, kNumElementsInMmaFragment>;
CUTLASS_DEVICE
WarpFragment operator()(WarpFragment const &src) {
return src;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for `mma.sync` on 16b (F16/BF16) and `ldmatrix` on 8b (S8/U8)
/// for operand A multiplicand going through upcasting.
template <
/// Element type for the operand in registers for the mma.sync
typename ElementMma_,
/// Element type for the operand in shared memory for ldmatrix
typename ElementLoad_,
/// Number of mma.sync operations performed along rows or columns
int NumMmaInstructions,
/// Number of elements in warp fragment
int NumElementsInWarpFragment,
/// Number of elements in mma fragment
int NumElementsInMmaFragment
>
struct FragmentShuffler <ElementMma_, ElementLoad_,
NumMmaInstructions,
NumElementsInWarpFragment,
NumElementsInMmaFragment,
Operand::kA,
typename platform::enable_if<(sizeof_bits<ElementMma_>::value == 16) &&
(sizeof_bits<ElementLoad_>::value == 8)>::type> {
public:
using ElementMma = ElementMma_;
using ElementLoad = ElementLoad_;
static int const kNumMmaInstructions = NumMmaInstructions;
static int const kNumElementsInWarpFragment = NumElementsInWarpFragment;
static int const kNumElementsInMmaFragment = NumElementsInMmaFragment;
static Operand const kOperand = Operand::kA;
using WarpFragment = Array<ElementLoad, kNumElementsInWarpFragment>;
using MmaFragment = Array<ElementLoad, kNumElementsInMmaFragment>;
static uint32_t const kSelectBytesEvenThread = 0x5410;
static uint32_t const kSelectBytesOddThread = 0x7632;
private:
int delta_up_;
int delta_down_;
int odd_even_lane_id_;
uint32_t byte_selector_;
public:
CUTLASS_DEVICE
FragmentShuffler() {
int lane_id = cutlass::arch::LaneId();
delta_up_ = (lane_id & 1) + ((lane_id & 2) >> 1);
delta_down_ = 2 - delta_up_;
odd_even_lane_id_ = static_cast<int>(lane_id & 1);
byte_selector_ = odd_even_lane_id_ * kSelectBytesOddThread +
(1 - odd_even_lane_id_) * kSelectBytesEvenThread;
}
CUTLASS_DEVICE
WarpFragment operator()(WarpFragment const &src) {
WarpFragment result;
MmaFragment const* mma_frag_src_ptr = reinterpret_cast<MmaFragment const*>(&src);
MmaFragment* mma_frag_dst_ptr = reinterpret_cast<MmaFragment*>(&result);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kNumMmaInstructions; n++) {
uint32_t const* src_ptr = reinterpret_cast<uint32_t const *>(&mma_frag_src_ptr[n]);
uint32_t *dst_ptr = reinterpret_cast<uint32_t *>(&mma_frag_dst_ptr[n]);
// Shuffle data within the warp, pull from other threads within the warp
uint32_t tmp0 = __shfl_up_sync(0xFFFFFFFF, src_ptr[0], delta_up_);
uint32_t tmp1 = __shfl_down_sync(0xFFFFFFFF, src_ptr[0], delta_down_);
uint32_t tmp2 = __shfl_up_sync(0xFFFFFFFF, src_ptr[1], delta_up_);
uint32_t tmp3 = __shfl_down_sync(0xFFFFFFFF, src_ptr[1], delta_down_);
// Reorder the data within the 32-bit word (4x8b) required for mma.sync
dst_ptr[0] = __byte_perm(tmp0, tmp2, byte_selector_);
dst_ptr[1] = __byte_perm(tmp1, tmp3, byte_selector_);
}
return result;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for `mma.sync` on 16b (F16/BF16) and `ldmatrix` on 8b (S8/U8)
/// for operand B multiplicand going through upcasting.
template <
/// Element type for the operand in registers for the mma.sync
typename ElementMma_,
/// Element type for the operand in shared memory for ldmatrix
typename ElementLoad_,
/// Number of mma.sync operations performed along rows or columns
int NumMmaInstructions,
/// Number of elements in warp fragment
int NumElementsInWarpFragment,
/// Number of elements in mma fragment
int NumElementsInMmaFragment
>
struct FragmentShuffler <ElementMma_, ElementLoad_,
NumMmaInstructions,
NumElementsInWarpFragment,
NumElementsInMmaFragment,
Operand::kB,
typename platform::enable_if<(sizeof_bits<ElementMma_>::value == 16) &&
(sizeof_bits<ElementLoad_>::value == 8)>::type> {
public:
using ElementMma = ElementMma_;
using ElementLoad = ElementLoad_;
static int const kNumMmaInstructions = NumMmaInstructions;
static int const kNumElementsInWarpFragment = NumElementsInWarpFragment;
static int const kNumElementsInMmaFragment = NumElementsInMmaFragment;
static Operand const kOperand = Operand::kB;
using WarpFragment = Array<ElementLoad, kNumElementsInWarpFragment>;
using MmaFragment = Array<ElementLoad, kNumElementsInMmaFragment>;
static uint32_t const kSelectBytesEvenThread = 0x5410;
static uint32_t const kSelectBytesOddThread = 0x7632;
private:
int delta_up_;
int delta_down_;
int odd_even_lane_id_;
uint32_t byte_selector_;
public:
CUTLASS_DEVICE
FragmentShuffler() {
int lane_id = cutlass::arch::LaneId();
delta_up_ = (lane_id & 1) + ((lane_id & 2) >> 1);
delta_down_ = 2 - delta_up_;
odd_even_lane_id_ = static_cast<int>(lane_id & 1);
byte_selector_ = odd_even_lane_id_ * kSelectBytesOddThread +
(1 - odd_even_lane_id_) * kSelectBytesEvenThread;
}
CUTLASS_DEVICE
WarpFragment operator()(WarpFragment const &src) {
WarpFragment result;
MmaFragment const* mma_frag_src_ptr = reinterpret_cast<MmaFragment const *>(&src);
MmaFragment* mma_frag_dst_ptr = reinterpret_cast<MmaFragment *>(&result);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kNumMmaInstructions; n++) {
uint32_t const* src_ptr = reinterpret_cast<uint32_t const*>(&mma_frag_src_ptr[n]);
uint32_t* dst_ptr = reinterpret_cast<uint32_t*>(&mma_frag_dst_ptr[n]);
// Shuffle data within the warp, pull from other threads within the warp
uint32_t tmp0 = __shfl_up_sync(0xFFFFFFFF, src_ptr[0], delta_up_);
uint32_t tmp1 = __shfl_down_sync(0xFFFFFFFF, src_ptr[0], delta_down_);
// Reorder the data within the 32-bit word (4x8b) required for mma.sync
dst_ptr[0] = __byte_perm(tmp0, tmp1, byte_selector_);
}
return result;
}
};
////////////////////////////////////////////////////////////////////////////////
// Data type conversion
////////////////////////////////////////////////////////////////////////////////
template <
/// Destination type
typename ElementDst_,
/// Source type
typename ElementSrc_,
/// Number of elements
int N,
///
typename Enable = void>
struct FragmentConverter {
using ElementDst = ElementDst_;
using ElementSrc = ElementSrc_;
// Operand fragment registers in destination and source types
using DestinationFragment = Array<ElementDst, N>;
using SourceFragment = Array<ElementSrc, N>;
FastNumericArrayConverter<ElementDst, ElementSrc, N> convert;
CUTLASS_DEVICE
DestinationFragment operator()(SourceFragment const &src) const {
return convert(src);
}
};
////////////////////////////////////////////////////////////////////////////////
// Partial specialization for when Destination type is the *same* as
// Source type
template <
/// Data type
typename Element,
/// Number of elements
int N,
///
typename Enable>
struct FragmentConverter<Element, Element, N, Enable> {
using DestinationFragment = Array<Element, N>;
using SourceFragment = Array<Element, N>;
CUTLASS_DEVICE
DestinationFragment operator()(SourceFragment const &src) const {
return src;
}
};
} // namespace detail
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK_ = 1,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaMixedInputTensorOp {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Underlying arch::Mma instruction datatype for A operand
using ElementAMma = typename ArchMmaOperator::ElementA;
/// Underlying arch::Mma instruction datatype for B operand
using ElementBMma = typename ArchMmaOperator::ElementB;
/// Underlying arch::Mma instruction datatype for C operand
using MmaElementC = typename ArchMmaOperator::ElementC;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Architecture tag from underlying instruction
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
///
// static int const kLoadShapeK = InstructionShape::kK *
// (sizeof_bits<ElementAMma>::value / sizeof_bits<ElementB>::value);
public:
/// Iterates over the A operand in Shared Memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>, Operand::kA, ElementA, LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow, kThreadCount, kPartitionsK>;
/// Storage for A tile in registers (loaded from Shared Memory)
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile in registers (for use in Mma instruction)
using TransformedFragmentA =
Array<ElementAMma, FragmentA::kElements>;
/// Underlying arch::Mma instruction operand fragement for matrix A
using MmaOperandA = typename ArchMmaOperator::FragmentA;
/// Iterates over the B operand in Shared Memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>, Operand::kB, ElementB, LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kRow, kThreadCount, kPartitionsK>;
/// Storage for B tile in registers (loaded from Shared Memory)
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile in registers (for use in Mma instruction)
using TransformedFragmentB =
Array<ElementBMma, FragmentB::kElements>;
/// Underlying arch::Mma instruction operand fragement for matrix B
using MmaOperandB = typename ArchMmaOperator::FragmentB;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC,
typename ArchMmaOperator::Shape, typename Policy::OpDelta>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
/// Underlying arch::Mma instruction operand fragement for matrix C
using MmaOperandC = typename ArchMmaOperator::FragmentC;
/// Number of mma operations performed
using MmaIterations = MatrixShape<
(Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM,
(Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN
>;
public:
/// Underlying matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaMixedInputTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C
) const {
D = C;
MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A);
MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B);
MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D);
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n);
if (AccumulatorsInRowMajor) { // matrix B is reordered
mma(
ptr_D[n_serpentine + m * MmaIterations::kColumn],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[n_serpentine + m * MmaIterations::kColumn]);
} else {
mma(ptr_D[m + n_serpentine * MmaIterations::kRow],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[m + n_serpentine * MmaIterations::kRow]);
}
}
}
}
/// Transform the operand warp fragment register to the required data types and layout
/// for the `cultass::arch::Mma`
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
// Shuffle data within warp to obtain the mma.sync operand layout
detail::FragmentShuffler<ElementBMma, ElementB, MmaIterations::kColumn,
FragmentB::kElements, MmaOperandB::kElements, Operand::kB> shuffler_B;
FragmentB tmp_B;
tmp_B = shuffler_B(B);
// Convert the B operand to the Mma Instruction operand type
detail::FragmentConverter<ElementBMma, ElementB, FragmentB::kElements> convert_B;
dst_B = convert_B(tmp_B);
FragmentA tmp_A;
Array<ElementA, FragmentA::kElements / 2> *
ptr_tmp_A = reinterpret_cast<Array<ElementA,
FragmentA::kElements / 2> *>(&tmp_A);
Array<ElementAMma, FragmentA::kElements / 2> *
ptr_dst_A = reinterpret_cast<Array<ElementAMma,
FragmentA::kElements / 2> *>(&dst_A);
// Shuffle data within warp to obtain the mma.sync operand layout
detail::FragmentShuffler<ElementAMma, ElementA, MmaIterations::kRow,
FragmentA::kElements, MmaOperandA::kElements, Operand::kA> shuffler_A;
// Convert the A operand to the Mma Instruction operand type
detail::FragmentConverter<ElementAMma, ElementA, FragmentA::kElements / 2> convert_A;
tmp_A = shuffler_A(A);
ptr_dst_A[0] = convert_A(ptr_tmp_A[0]);
ptr_dst_A[1] = convert_A(ptr_tmp_A[1]);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_mixed_input_tensor_op.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_mixed_input_tensor_op.h",
"repo_id": "include",
"token_count": 7131
} | 43 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/wmma.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/wmma_array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
///< Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity (A or B)
Operand Operand,
/// Data type of operand
typename Element_,
/// Layout of operand
typename Layout_,
/// Delta between *MMA operations (in units of *WMMA operations, concept:MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads,
/// Shape of the warp in units of thread (concept: MmaTensorOpPolicy)
typename Policy_>
class MmaTensorOpWmmaMultiplicandTileIterator;
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread WMMA operation.
/// It uses nvcuda::wmma::load_matrix_sync to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
////////////////////////////////////////////////////////////////////////////////
template <
///< Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Interval between adjacent *WMMA instructions (in units of WMMA instructions)
int OpDelta_,
/// Shape of the warp in units of thread (concept: MmaTensorOpPolicy)
typename Policy_>
class MmaTensorOpWmmaMultiplicandTileIterator<
Shape_, Operand::kA, Element_, Layout_,
OpDelta_, 32, Policy_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kA;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Delta between *WMMA operations
static int const kOpDelta = OpDelta_;
/// Wmma Operator information and operation delta
using Policy = Policy_;
//
// Derived quantities
//
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Stride Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Native Wmma shape for operand A (concept MatrixShape)
using WmmaShape = MatrixShape<
Policy::Operator::Shape::kM,
Policy::Operator::Shape::kK
>;
/// Map cutlass dataype to nvcuda::wmma datatype
using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType<Element>::Type;
/// Shape of individual WMMA load / stores for operand A
using Iterations = MatrixShape<
Shape::kRow / WmmaShape::kRow,
1
>;
/// Fragment object holding a warps part
using Fragment = WmmaFragmentArray<typename Policy::Operator::FragmentA, Iterations::kCount>;
//////////////////////////////////////////////////////////////////////////////////////////////////////
/// statically assert this specialization
/////////////////////////////////////////////////////////////////////////////////////////////////////
/// This iterator is specalized for Operand A
static_assert(kOperand == Operand::kA,
"MmaTensorOpWmmaMultiplicandTileIterator may only be instantiated for A operands to warp-level Mma.");
/// Supported memory layouts
static_assert(
platform::is_same<cutlass::layout::RowMajor, Layout>::value ||
platform::is_same<cutlass::layout::ColumnMajor, Layout>::value,
"Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor");
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/////////////////////////////////////////////////////////////////////////////////////////////////////
private:
/// Shared memory base pointers - not advanced
char const *pointer_;
/// Byte offset into shared memory - advanced
Index byte_offset_;
/// Stride in units of number of elements
StrideIndex stride_;
/// Layout of shared memory
Layout layout_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): pointer_(reinterpret_cast<char const*>(ref.data())), byte_offset_(0), stride_(ref.stride(0)), layout_(ref.stride(0)) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += (offset * sizeof_bits<Element>::value) / 8;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
Index elements_offset = layout_({tile_offset.row() * Shape::kRow, tile_offset.column() * WmmaShape::kColumn});
byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator++() {
Index elements_offset = layout_({0, WmmaShape::kColumn});
byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8;
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator--() {
Index elements_offset = layout_({0, WmmaShape::kColumn});
byte_offset_ -= (elements_offset * sizeof_bits<Element>::value) / 8;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load_with_byte_offset(Fragment &frag, Index byte_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kColumn; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kRow; ++m) {
Index load_byte_offset = layout_({m * WmmaShape::kRow, k * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8;
const WmmaDataType *ptr = reinterpret_cast<const WmmaDataType *>(pointer_ + byte_offset_ + load_byte_offset + byte_offset);
nvcuda::wmma::load_matrix_sync(frag[m], ptr, stride_);
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_byte_offset(Fragment const &frag, Index byte_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kColumn; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kRow; ++m) {
Index store_byte_offset = layout_({m * WmmaShape::kRow, k * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8;
WmmaDataType *ptr = reinterpret_cast<WmmaDataType *>(pointer_ + byte_offset_ + store_byte_offset + byte_offset);
nvcuda::wmma::store_matrix_sync(ptr, frag[m], stride_);
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_byte_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread WMMA operation.
/// It uses nvcuda::wmma::load_matrix_sync to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
////////////////////////////////////////////////////////////////////////////////
template <
///< Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Interval between adjacent *WMMA instructions (in units of WMMA instructions)
int OpDelta_,
/// Shape of the warp in units of thread (concept: MmaTensorOpPolicy)
typename Policy_>
class MmaTensorOpWmmaMultiplicandTileIterator<
Shape_, Operand::kB, Element_, Layout_,
OpDelta_, 32, Policy_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kB;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Delta between *WMMA operations
static int const kOpDelta = OpDelta_;
/// Wmma Operator information and operation delta
using Policy = Policy_;
//
// Derived quantities
//
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Stride Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Native Wmma shape (concept MatrixShape)
using WmmaShape = MatrixShape<
Policy::Operator::Shape::kK,
Policy::Operator::Shape::kN
>;
/// Map cutlass dataype to nvcuda::wmma datatype
using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType<Element>::Type;
/// Shape of individual WMMA load / stores for operand B
using Iterations = MatrixShape<
1,
Shape::kColumn / WmmaShape::kColumn
>;
/// Fragment object holding a warps part
using Fragment = WmmaFragmentArray<typename Policy::Operator::FragmentB, Iterations::kCount>;
//////////////////////////////////////////////////////////////////////////////////////////////////////
/// statically asserts this specialization
/////////////////////////////////////////////////////////////////////////////////////////////////////
/// This iterator is specalized for Operand B
static_assert(kOperand == Operand::kB,
"MmaTensorOpWmmaMultiplicandTileIterator may only be instantiated for B operands to warp-level Mma.");
/// Supported memory layouts
static_assert(
platform::is_same<cutlass::layout::RowMajor, Layout>::value ||
platform::is_same<cutlass::layout::ColumnMajor, Layout>::value,
"Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor");
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/////////////////////////////////////////////////////////////////////////////////////////////////////
private:
/// Shared memory base pointers - not advanced
char const *pointer_;
/// Byte offset into shared memory - advanced
Index byte_offset_;
/// Stride in units of number of elements
StrideIndex stride_;
/// Layout of shared memory
Layout layout_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): pointer_(reinterpret_cast<char const*>(ref.data())), byte_offset_(0), stride_(ref.stride(0)), layout_(ref.stride(0)) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += (offset * sizeof_bits<Element>::value) / 8;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
Index elements_offset = layout_({tile_offset.row() * WmmaShape::kRow, tile_offset.column() * Shape::kColumn});
byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator++() {
Index elements_offset = layout_({WmmaShape::kRow, 0});
byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8;
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator--() {
Index elements_offset = layout_({WmmaShape::kRow, 0});
byte_offset_ -= (elements_offset * sizeof_bits<Element>::value) / 8;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load_with_byte_offset(Fragment &frag, Index byte_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kRow; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
Index load_byte_offset = layout_({k * WmmaShape::kRow, n * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8;
const WmmaDataType *ptr = reinterpret_cast<const WmmaDataType *>(pointer_ + byte_offset_ + load_byte_offset + byte_offset);
nvcuda::wmma::load_matrix_sync(frag[n], ptr, stride_);
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_byte_offset(Fragment const &frag, Index byte_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kRow; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
Index store_byte_offset = layout_({k * WmmaShape::kRow, n * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8;
WmmaDataType *ptr = reinterpret_cast<WmmaDataType *>(pointer_ + byte_offset_ + store_byte_offset + byte_offset);
nvcuda::wmma::store_matrix_sync(ptr, frag[n], stride_);
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_byte_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
////////////////////////////////////////////////////////////////////////////////
template <
///< Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Interval between adjacent *WMMA instructions (in units of WMMA instructions, concept: MatrixShape)
typename OpDelta_,
/// Shape of the warp in units of thread (concept: MmaTensorOpPolicy)
typename Policy_>
class MmaTensorOpWmmaAccumulatorTileIterator;
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread WMMA operation.
/// It uses nvcuda::wmma::store_matrix_sync to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
////////////////////////////////////////////////////////////////////////////////
template <
///< Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Interval between adjacent *WMMA instructions (in units of WMMA instructions)
typename OpDelta_,
/// Shape of the warp in units of thread (concept: MmaTensorOpPolicy)
typename Policy_>
class MmaTensorOpWmmaAccumulatorTileIterator
{
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Wmma Operator information and operation delta
using Policy = Policy_;
//
// Derived quantities
//
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Native Wmma shape (concept MatrixShape)
using WmmaShape = MatrixShape<
Policy::Operator::Shape::kM,
Policy::Operator::Shape::kN
>;
/// Map cutlass dataype to nvcuda::wmma datatype
using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType<Element>::Type;
/// Map cutlass::layout to nvuda::wmma::layout_t enum
static nvcuda::wmma::layout_t const WmmaLayout = cutlass::arch::CutlassToWmmaLayout<Layout>::value;
/// Shape of individual WMMA load / stores for accumulator
using Iterations = MatrixShape<
Shape::kRow / WmmaShape::kRow,
Shape::kColumn / WmmaShape::kColumn
>;
/// Fragment object holding a thread's part of a tile
using Fragment = WmmaFragmentArray<typename Policy::Operator::FragmentC, Iterations::kCount>;
//////////////////////////////////////////////////////////////////////////////////////////////////////
/// statically asserts this specialization
/////////////////////////////////////////////////////////////////////////////////////////////////////
/// Supported layouts
static_assert(
platform::is_same<cutlass::layout::RowMajor, Layout>::value ||
platform::is_same<cutlass::layout::ColumnMajor, Layout>::value,
"Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor");
private:
/// Internal reference
cutlass::TensorRef<Element, Layout> ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
): ref_(ref) { }
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset({tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator & operator++() {
ref_.add_coord_offset({Shape::kRow, 0});
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator & operator--() {
ref_.add_coord_offset({-Shape::kRow, 0});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
const WmmaDataType * ptr = reinterpret_cast<const WmmaDataType*> (ref_.data() + ref_.offset({m * WmmaShape::kRow, n * WmmaShape::kColumn}) + pointer_offset);
nvcuda::wmma::load_matrix_sync(frag[m * Iterations::kColumn + n], ptr, ref_.stride()[0], WmmaLayout);
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
WmmaDataType * ptr = reinterpret_cast<WmmaDataType*> (ref_.data() + ref_.offset({m * WmmaShape::kRow, n * WmmaShape::kColumn}) + pointer_offset);
nvcuda::wmma::store_matrix_sync(ptr, frag[m * Iterations::kColumn + n], ref_.stride()[0], WmmaLayout);
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
| include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h",
"repo_id": "include",
"token_count": 8607
} | 44 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by TensorRef and derived classes for pitch-linear memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
#include "cutlass/pitch_linear_coord.h"
namespace cutlass {
namespace layout {
template <int Contiguous, int Strided>
using PitchLinearShape = cutlass::PitchLinearShape < Contiguous, Strided >;
using PitchLinearCoord = PitchLinearCoord;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for pitch-linear memory
class PitchLinear {
public:
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
PitchLinear(LongIndex ldm = 0): stride_(ldm) { }
/// Constructor
CUTLASS_HOST_DEVICE
PitchLinear(Stride _stride): stride_(_stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static PitchLinear packed(TensorCoord const &extent) {
return PitchLinear(extent.contiguous());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return LongIndex(coord.contiguous()) + LongIndex(coord.strided()) * LongIndex(stride_[0]);
}
/// Returns the logical coordinate given an offset.
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex index) const {
return make_Coord(
TensorCoord::Index(index % stride_[0]),
TensorCoord::Index(index / stride_[0])
);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
LongIndex stride(int rank) const {
return stride_[rank];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
LongIndex & stride(int rank) {
return stride_[rank];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent.strided() * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
| include/cutlass/layout/pitch_linear.h/0 | {
"file_path": "include/cutlass/layout/pitch_linear.h",
"repo_id": "include",
"token_count": 1370
} | 45 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines container classes and iterators for managing a statically sized vector
of boolean predicates.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#include <cuda/std/cstdint>
#else
#include <assert.h>
#include <stdint.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/platform/platform.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/*!@defgroup predicate_vector_concept Predicate Vector Concept
@{
Implementations of \ref predicate_vector_concept contain an ordered set of boolean predicates which
may be used as conditionals in other device-side operations. Both random access and iterators
offering sequential access are provided.
@par Predicate Vector
A \ref predicate_vector_concept satisfies the following expressions
- <b>at(int idx)</b> - returns the value of the indexed predicate
- <b>set(int idx, bool value)</b> - sets the value of the indexed predicate
- <b>begin()</b> - returns a \ref predicate_iterator_concept pointing to the first predicate
@}
*/
////////////////////////////////////////////////////////////////////////////////////////////////////
/*!@defgroup predicate_iterator_concept Predicate Iterator Concept
@{
Implementations of \ref predicate_iterator_concept enables accessing and traversing elements of a
bit vector.
@par Const Predicate Iterator
A const \ref predicate_iterator_concept satisfies the following expressions
- <b>++it</b> increments the iterator to the next predicate
- <b>*it</b> returns the value of the currently pointed-to predicate
@par Mutable Predicate Iterator
A \ref predicate_iterator_concept that is non-const <b>also</b> satisfies the following expressions
- <b>it.set(bool value)</b> sets the value of the currently pointed-to predicate
@}
*/
////////////////////////////////////////////////////////////////////////////////////////////////////
/*!@defgroup predicate_tile_adapter Predicate Tile Adapter Concept
@{
Implementations of \ref predicate_tile_adapter provide a mapping between a the elements of a \ref
tile_traits_concept and a \ref predicate_vector_concept.
@par Predicate Tile Adapter
A \ref predicate_tile_adapter satisfies the following expressions
- <b>at(int d, int h, int w, int c)</b> - returns the value of a predicate corresponding to the
access (d, h, w, c) within the tile.
@}
*/
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically sized array of bits implementing @concept{predicate_vector_concept}.
template <
/// Number of predicates contained in predicate vector
int kPredicates_,
/// Number of predicates contained in each byte of internal storage
int kPredicatesPerByte_ = 4,
/// Location of first predicate within byte of internal storage
int kPredicateStart_ = 0>
struct PredicateVector {
/// Number of bits stored by the PredicateVector
static constexpr int kPredicates = kPredicates_;
/// Number of bits stored within each byte of the predicate bit vector
static constexpr int kPredicatesPerByte = kPredicatesPerByte_;
/// First bit within each byte containing predicates
static constexpr int kPredicateStart = kPredicateStart_;
// Make sure no one tries to put more than 8 bits in a byte :)
static_assert(kPredicatesPerByte <= 8, "kPredicatesPerByte must fit within an actual byte");
// Make sure the "offsetted" bits fit in one byte.
static_assert(kPredicateStart + kPredicatesPerByte <= 8,
"The offsetted predicates must fit within an actual byte.");
/// Storage type of individual elements
typedef uint32_t Storage;
/// Number of bytes needed
static constexpr int kBytes = (kPredicates + kPredicatesPerByte - 1) / kPredicatesPerByte;
/// Number of storage elements needed
static constexpr int kWordCount = (kBytes + int(sizeof(Storage)) - 1) / int(sizeof(Storage));
/// The byte mask corresponding to predicates
static constexpr Storage kByteMask = (((1 << kPredicatesPerByte) - 1) << kPredicateStart);
private:
//
// Data members
//
/// Words of bit vector
Storage storageData[kWordCount];
//
// Methods
//
/// Computes the word and bit corresponding to a logical predicate index
CUTLASS_HOST_DEVICE void computeStorageOffset(int &word, int &bit, int idx) const {
CUTLASS_ASSERT(idx < kPredicates);
int byte = (idx / kPredicatesPerByte);
int bit_offset = (idx % kPredicatesPerByte);
word = byte / sizeof(Storage);
int byte_offset = (byte % sizeof(Storage));
bit = byte_offset * 8 + bit_offset + kPredicateStart;
}
/// Returns word mask.
CUTLASS_HOST_DEVICE static constexpr bool computeWordMask() {
Storage mask(0);
CUTLASS_PRAGMA_UNROLL
for (size_t byte = 0; byte < sizeof(Storage); ++byte) {
mask |= (kByteMask << (byte * 8));
}
return mask;
}
/// Returns mask of last word.
CUTLASS_HOST_DEVICE static constexpr bool computeLastWordMask() {
Storage mask(0);
CUTLASS_PRAGMA_UNROLL
for (int byte = 0; byte < kBytes % sizeof(Storage); ++byte) {
mask |= (kByteMask << (byte * 8));
}
return mask;
}
/// Accesses a given word with optional assertions
CUTLASS_HOST_DEVICE Storage &storage(int word) {
CUTLASS_ASSERT(word < kWordCount);
return storageData[word];
}
/// Accesses a given word with optional assertions
CUTLASS_HOST_DEVICE Storage const &storage(int word) const {
CUTLASS_ASSERT(word < kWordCount);
return storageData[word];
}
public:
//
// Iterator
//
/**
* @brief An iterator implementing \ref predicate_iterator_concept enabling sequential
* read and write access to predicates.
* @concept{predicate_iterator_concept}
*/
class Iterator {
/// Reference to PredicateVector instance
PredicateVector &vec_;
/// Index into PredicateVector
int bit_;
public:
/// Copy constructor
CUTLASS_HOST_DEVICE
Iterator(Iterator const &it) : vec_(it.vec_), bit_(it.bit_) {}
/// Constructs an iterator from a PredicateVector
CUTLASS_HOST_DEVICE
Iterator(PredicateVector &vec, int _start = 0) : vec_(vec), bit_(_start) {}
/// Pre-increment
CUTLASS_HOST_DEVICE
Iterator &operator++() {
++bit_;
return *this;
}
/// Increment
CUTLASS_HOST_DEVICE
Iterator &operator+=(int offset) {
bit_ += offset;
return *this;
}
/// Pre-decrement
CUTLASS_HOST_DEVICE
Iterator &operator--() {
--bit_;
return *this;
}
/// Decrement
CUTLASS_HOST_DEVICE
Iterator &operator-=(int offset) {
bit_ -= offset;
return *this;
}
/// Post-increment
CUTLASS_HOST_DEVICE
Iterator operator++(int) {
Iterator ret(*this);
ret.bit_++;
return ret;
}
/// Post-decrement
CUTLASS_HOST_DEVICE
Iterator operator--(int) {
Iterator ret(*this);
ret.bit_--;
return ret;
}
/// Iterator advances by some amount
CUTLASS_HOST_DEVICE
Iterator operator+(int offset) {
Iterator ret(*this);
ret.bit_ += offset;
return ret;
}
/// Iterator recedes by some amount
CUTLASS_HOST_DEVICE
Iterator operator-(int offset) {
ConstIterator ret(*this);
ret.bit_ -= offset;
return ret;
}
/// Returns true if iterators point to the same bit
CUTLASS_HOST_DEVICE
bool operator==(Iterator const &it) const { return bit_ == it.bit_; }
/// Returns false if iterators point to the same bit
CUTLASS_HOST_DEVICE
bool operator!=(Iterator const &it) const { return bit_ != it.bit_; }
/// Gets the bit at the pointed to location
CUTLASS_HOST_DEVICE
bool get() { return vec_.at(bit_); }
/// Gets the bit at the pointed to location
CUTLASS_HOST_DEVICE
bool at() const { return vec_.at(bit_); }
/// Dereferences iterator
CUTLASS_HOST_DEVICE
bool operator*() const { return at(); }
/// Sets the bit at the pointed to location
CUTLASS_HOST_DEVICE
void set(bool value = true) { vec_.set(bit_, value); }
};
/**
* @brief An iterator implementing \ref predicate_iterator_concept enabling sequential
* read and write access to predicates.
* @concept{predicate_iterator_concept}
*/
class ConstIterator {
/// Reference to PredicateVector instance
PredicateVector const &vec_;
/// Index into PredicateVector
int bit_;
public:
/// Copy constructor
CUTLASS_HOST_DEVICE
ConstIterator(ConstIterator const &it) : vec_(it.vec_), bit_(it.bit_) {}
/// Constructs an iterator from a PredicateVector
CUTLASS_HOST_DEVICE
ConstIterator(PredicateVector const &vec, int _start = 0) : vec_(vec), bit_(_start) {}
/// Pre-increment
CUTLASS_HOST_DEVICE
ConstIterator &operator++() {
++bit_;
return *this;
}
/// Increment
CUTLASS_HOST_DEVICE
ConstIterator &operator+=(int offset) {
bit_ += offset;
return *this;
}
/// Pre-decrement
CUTLASS_HOST_DEVICE
ConstIterator &operator--() {
--bit_;
return *this;
}
/// Decrement
CUTLASS_HOST_DEVICE
ConstIterator &operator-=(int offset) {
bit_ -= offset;
return *this;
}
/// Post-increment
CUTLASS_HOST_DEVICE
ConstIterator operator++(int) {
ConstIterator ret(*this);
ret.bit_++;
return ret;
}
/// Post-decrement
CUTLASS_HOST_DEVICE
ConstIterator operator--(int) {
ConstIterator ret(*this);
ret.bit_--;
return ret;
}
/// Iterator advances by some amount
CUTLASS_HOST_DEVICE
ConstIterator operator+(int offset) {
ConstIterator ret(*this);
ret.bit_ += offset;
return ret;
}
/// Iterator recedes by some amount
CUTLASS_HOST_DEVICE
ConstIterator operator-(int offset) {
ConstIterator ret(*this);
ret.bit_ -= offset;
return ret;
}
/// Returns true if iterators point to the same bit
CUTLASS_HOST_DEVICE
bool operator==(ConstIterator const &it) const { return bit_ == it.bit_; }
/// Returns false if iterators point to the same bit
CUTLASS_HOST_DEVICE
bool operator!=(ConstIterator const &it) const { return bit_ != it.bit_; }
/// Gets the bit at the pointed to location
CUTLASS_HOST_DEVICE
bool get() { return vec_.at(bit_); }
/// Gets the bit at the pointed to location
CUTLASS_HOST_DEVICE
bool at() const { return vec_.at(bit_); }
/// Dereferences iterator
CUTLASS_HOST_DEVICE
bool operator*() const { return at(); }
};
/// Iterator that always returns true
struct TrivialIterator {
/// Constructor
CUTLASS_HOST_DEVICE
TrivialIterator() {}
/// Copy constructor
CUTLASS_HOST_DEVICE
TrivialIterator(Iterator const &it) {}
/// Constructs an iterator from a PredicateVector
CUTLASS_HOST_DEVICE
TrivialIterator(PredicateVector const &_vec) {}
/// Pre-increment
CUTLASS_HOST_DEVICE
TrivialIterator &operator++() { return *this; }
/// Post-increment
CUTLASS_HOST_DEVICE
TrivialIterator operator++(int) { return *this; }
/// Dereferences iterator
CUTLASS_HOST_DEVICE
bool operator*() const { return true; }
};
public:
//
// Methods
//
/// Initialize the predicate vector
CUTLASS_HOST_DEVICE PredicateVector(bool value = true) { fill(value); }
/// Fills all predicates with a given value
CUTLASS_HOST_DEVICE void fill(bool value = true) {
Storage item = (value ? ~Storage(0) : Storage(0));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kWordCount; ++i) {
storage(i) = item;
}
}
/// Clears all predicates
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kWordCount; ++i) {
storage(i) = 0;
}
}
/// Sets all predicates to true
CUTLASS_HOST_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kWordCount; ++i) {
storage(i) = ~Storage(0);
}
}
/// Accesses a bit within the predicate vector.
CUTLASS_HOST_DEVICE bool operator[](int idx) const { return at(idx); }
/// Accesses a bit within the predicate vector.
CUTLASS_HOST_DEVICE bool at(int idx) const {
int bit, word;
computeStorageOffset(word, bit, idx);
return ((storage(word) >> bit) & 1);
}
/// Set a bit within the predicate vector.
CUTLASS_HOST_DEVICE void set(int idx, bool value = true) {
int bit, word;
computeStorageOffset(word, bit, idx);
Storage disable_mask = (~(Storage(1) << bit));
Storage enable_mask = (Storage(value) << bit);
storage(word) = ((storage(word) & disable_mask) | enable_mask);
}
/// Computes the intersection of two identical predicate vectors.
CUTLASS_HOST_DEVICE PredicateVector &operator&=(PredicateVector const &predicates) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kWordCount; ++i) {
storage(i) = (storage(i) & predicates.storage(i));
}
return *this;
}
/// Computes the union of two identical predicate vectors.
CUTLASS_HOST_DEVICE PredicateVector &operator|=(PredicateVector const &predicates) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kWordCount; ++i) {
storage(i) = (storage(i) | predicates.storage(i));
}
return *this;
}
/// Returns true if entire predicate array is zero.
CUTLASS_HOST_DEVICE bool is_zero() const {
constexpr Storage mask = computeWordMask();
Storage result = 0;
CUTLASS_PRAGMA_UNROLL
for (int word = 0; word < kWordCount - 1; ++word) {
result |= (storage(word) & mask);
}
constexpr Storage last_word_mask = computeLastWordMask();
result |= (storage(kWordCount - 1) & last_word_mask);
return result == 0;
}
/// Returns an iterator to the start of the bit vector
CUTLASS_DEVICE
Iterator begin() { return Iterator(*this); }
/// Returns an iterator
CUTLASS_DEVICE
Iterator end() { return Iterator(*this, kPredicates); }
/// Returns a ConstIterator
CUTLASS_DEVICE
ConstIterator const_begin() const { return ConstIterator(*this); }
/// Returns a ConstIterator
CUTLASS_DEVICE
ConstIterator const_end() const { return ConstIterator(*this, kPredicates); }
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/predicate_vector.h/0 | {
"file_path": "include/cutlass/predicate_vector.h",
"repo_id": "include",
"token_count": 5505
} | 46 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Provides a mechanism for packing and unpacking elements smaller than one byte
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/integer_subbyte.h"
#include "cutlass/fast_math.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This class provides a mechanism for packing and unpacking elements smaller than one byte. It
/// assumes these sub-byte elements are packed in a traditional C++ numeric type.
///
/// The intended application is to provide a mechanism to indirectly reference elements in
/// memory or Array<> objects whose addresses cannot otherwise be taken since they are smaller
/// than one byte.
///
/// Supports basic pointer arithmetic:
///
/// Example:
///
/// int4b_t *ptr = ...;
///
/// SubbyteReference<int4b_t> ref = ptr;
/// ref += 15;
///
/// int4b_t x = ref; // load an int4b_t
/// ref = x + 2_s4; // perform arithmetic on int4b_t and then store
///
template <
typename Element_, /// CUTLASS numeric element type.
typename Storage_ = uint8_t, /// Underlying storage type. Must be able to hold an integer
/// number of objects of type Element.
class = void
>
class ConstSubbyteReference {
public:
using Element = Element_;
using Storage = Storage_;
using StoragePointer = Storage const *;
static_assert(sizeof_bits<Element>::value <= sizeof_bits<Storage>::value,
"Size of Element must not be greater than Storage.");
static_assert(!(sizeof_bits<Storage>::value % sizeof_bits<Element>::value),
"Storage must be divisible by Element");
private:
///! Number of elements per storage vector
int const kElementsPerVector = sizeof_bits<Storage>::value / sizeof_bits<Element>::value;
///! Bit mask
Storage const kMask =
((sizeof_bits<Element>::value < sizeof_bits<Storage>::value) ?
(Storage(1) << sizeof_bits<Element>::value) - Storage(1) :
~Storage(0));
private:
/// Pointer to array containing element
StoragePointer ptr_;
/// Offset (in units of elements) from pointer.
///
/// Invariant: must always be in range [0, kElementsPerVector)
int offset_;
public:
CUTLASS_HOST_DEVICE
ConstSubbyteReference(): ptr_(nullptr), offset_(0) { }
/// Constructor
CUTLASS_HOST_DEVICE
ConstSubbyteReference(
Element const *ptr, /// pointer to memory
int64_t offset /// logical offset in units of Element
):
ptr_(reinterpret_cast<StoragePointer>(ptr)),
offset_(0) {
int64_t offset_in_vectors = offset / kElementsPerVector;
int64_t offset_in_elements = offset % kElementsPerVector;
ptr_ += offset_in_vectors;
offset_ = int(offset_in_elements);
}
/// Constructor
CUTLASS_HOST_DEVICE
ConstSubbyteReference(
Element *ptr = nullptr
): ConstSubbyteReference(ptr, 0) { }
/// Gets storage pointer
CUTLASS_HOST_DEVICE
StoragePointer storage_pointer() const {
return ptr_;
}
/// Gets element offset within storage vector
CUTLASS_HOST_DEVICE
int element_offset() const {
return offset_;
}
/// Unpacks an element from memory
CUTLASS_HOST_DEVICE
Element get() const {
Storage item = Storage((*ptr_ >> (offset_ * sizeof_bits<Element>::value)) & kMask);
return reinterpret_cast<Element const &>(item);
}
/// Unpacks an element from memory
CUTLASS_HOST_DEVICE
operator Element() const {
return get();
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference &operator+=(int offset) {
offset += offset_;
int offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = offset % kElementsPerVector;
ptr_ += offset_in_vectors;
offset_ = offset_in_elements;
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference &operator+=(long long offset) {
offset += offset_;
long long offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = int(offset % kElementsPerVector);
ptr_ += offset_in_vectors;
offset_ = offset_in_elements;
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference &operator-=(int offset) {
int offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = offset % kElementsPerVector;
ptr_ -= offset_in_vectors;
offset_ -= offset_in_elements;
if (offset_ < 0) {
offset_ += kElementsPerVector;
--ptr_;
}
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference &operator-=(long long offset) {
long long offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = int(offset % kElementsPerVector);
ptr_ -= offset_in_vectors;
offset_ -= offset_in_elements;
if (offset_ < 0) {
offset_ += kElementsPerVector;
--ptr_;
}
return *this;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference operator+(int offset) const {
ConstSubbyteReference ref(ptr_, offset_);
ref += offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference operator+(long long offset) const {
ConstSubbyteReference ref(ptr_, offset_);
ref += offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference operator-(int offset) const {
ConstSubbyteReference ref(ptr_, offset_);
ref -= offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference operator-=(long long offset) const {
ConstSubbyteReference ref(ptr_, offset_);
ref -= offset;
return ref;
}
/// Computes the difference in elements between references
CUTLASS_HOST_DEVICE
ptrdiff_t operator-(ConstSubbyteReference ref) const {
return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_);
}
/// Explicit cast to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(get());
}
/// Explicit cast to signed 64-bit integer
CUTLASS_HOST_DEVICE
explicit operator int64_t() const {
return int64_t(get());
}
/// Explicit cast to unsigned 64-bit integer
CUTLASS_HOST_DEVICE
explicit operator uint64_t() const {
return uint64_t(get());
}
/// Explicit cast to float
CUTLASS_HOST_DEVICE
explicit operator float() const {
return float(get());
}
/// Explicit cast to double
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(get());
}
};
template <
typename Element_, /// CUTLASS numeric element type.
typename Storage_ = /// Underlying storage type. Must be able to hold an integer
/// number of objects of type Element.
#if defined(__CUDA_ARCH__) /// Default size depends on width of atomicCas() overloads.
#if (__CUDA_ARCH__ >= 700) ///
uint16_t
#else
uint32_t
#endif
#else
uint8_t
#endif
,
class = void
>
class SubbyteReference {
public:
using Element = Element_;
using Storage = Storage_;
using StoragePointer = Storage *;
static_assert(sizeof_bits<Element>::value <= sizeof_bits<Storage>::value,
"Size of Element must not be greater than Storage.");
static_assert(!(sizeof_bits<Storage>::value % sizeof_bits<Element>::value),
"Storage must be divisible by Element");
private:
///! Number of elements per storage vector
int const kElementsPerVector = sizeof_bits<Storage>::value / sizeof_bits<Element>::value;
///! Bit mask
Storage const kMask =
((sizeof_bits<Element>::value < sizeof_bits<Storage>::value) ?
(Storage(1) << sizeof_bits<Element>::value) - Storage(1) :
~Storage(0));
private:
/// Pointer to array containing element
StoragePointer ptr_;
/// Offset (in units of elements) from pointer.
///
/// Invariant: must always be in range [0, kElementsPerVector)
int offset_;
public:
CUTLASS_HOST_DEVICE
SubbyteReference(): ptr_(nullptr), offset_(0) { }
/// Constructor
CUTLASS_HOST_DEVICE
SubbyteReference(
Element *ptr, /// pointer to memory
int64_t offset /// logical offset in units of Element
):
ptr_(reinterpret_cast<StoragePointer>(ptr)),
offset_(0) {
int64_t offset_in_vectors = offset / kElementsPerVector;
int64_t offset_in_elements = offset % kElementsPerVector;
ptr_ += offset_in_vectors;
offset_ = int(offset_in_elements);
}
/// Constructor
CUTLASS_HOST_DEVICE
SubbyteReference(
Element *ptr = nullptr
): SubbyteReference(ptr, 0) { }
/// Gets storage pointer
CUTLASS_HOST_DEVICE
StoragePointer storage_pointer() const {
return ptr_;
}
/// Gets storage pointer
CUTLASS_HOST_DEVICE
Element * operator&() const {
return reinterpret_cast<Element *>(ptr_);
}
/// Gets element offset within storage vector
CUTLASS_HOST_DEVICE
int element_offset() const {
return offset_;
}
/// Unpacks an element from memory
CUTLASS_HOST_DEVICE
Element get() const {
uint8_t const* byte_ptr = reinterpret_cast<uint8_t const*>(ptr_);
// Convert offset in elements to offset in bytes
constexpr int elements_per_byte = cutlass::sizeof_bits<uint8_t>::value / cutlass::sizeof_bits<Element>::value;
byte_ptr += offset_ / elements_per_byte;
// Offset of element within a byte
int byte_offset = offset_ % elements_per_byte;
uint8_t item = uint8_t((*byte_ptr >> (byte_offset * cutlass::sizeof_bits<Element>::value)) & kMask);
return reinterpret_cast<Element const &>(item);
}
/// Stores an element to memory
CUTLASS_HOST_DEVICE
SubbyteReference & set(Element const &x) {
Storage item = (reinterpret_cast<Storage const &>(x) & kMask);
Storage kUpdateMask = Storage(~(kMask << (offset_ * cutlass::sizeof_bits<Element>::value)));
Storage new_bits = Storage(item << (offset_ * cutlass::sizeof_bits<Element>::value));
#if defined(__CUDA_ARCH__)
//
// Homebrew read-modify-write
//
Storage original;
Storage updated;
do {
original = (*ptr_);
updated = Storage((original & kUpdateMask) | new_bits);
original = atomicCAS(ptr_, original, updated);
} while (updated != original);
#else
Storage original = (*ptr_);
Storage updated = Storage((original & kUpdateMask) | new_bits);
*ptr_ = updated;
#endif
return *this;
}
////
/// Unpacks an element from memory
CUTLASS_HOST_DEVICE
operator Element() const {
return get();
}
/// Stores an element to memory
CUTLASS_HOST_DEVICE
SubbyteReference &operator=(Element const & x) {
return set(x);
}
/// Stores an element to memory
CUTLASS_HOST_DEVICE
SubbyteReference &operator=(SubbyteReference const & x) {
return set(x.get());
}
/// Stores an element to memory
CUTLASS_HOST_DEVICE
SubbyteReference &operator=(
ConstSubbyteReference<Element, Storage> const &x) {
return set(x.get());
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
SubbyteReference &operator+=(int offset) {
offset += offset_;
int offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = offset % kElementsPerVector;
ptr_ += offset_in_vectors;
offset_ = offset_in_elements;
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
SubbyteReference &operator+=(long long offset) {
offset += offset_;
long long offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = int(offset % kElementsPerVector);
ptr_ += offset_in_vectors;
offset_ = offset_in_elements;
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
SubbyteReference &operator-=(int offset) {
int offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = offset % kElementsPerVector;
ptr_ -= offset_in_vectors;
offset_ -= offset_in_elements;
if (offset_ < 0) {
offset_ += kElementsPerVector;
--ptr_;
}
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
SubbyteReference &operator-=(long long offset) {
long long offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = int(offset % kElementsPerVector);
ptr_ -= offset_in_vectors;
offset_ -= offset_in_elements;
if (offset_ < 0) {
offset_ += kElementsPerVector;
--ptr_;
}
return *this;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
SubbyteReference operator+(int offset) const {
SubbyteReference ref(ptr_, offset_);
ref += offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
SubbyteReference operator+(long long offset) const {
SubbyteReference ref(ptr_, offset_);
ref += offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
SubbyteReference operator-(int offset) const {
SubbyteReference ref(ptr_, offset_);
ref -= offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
SubbyteReference operator-=(long long offset) const {
SubbyteReference ref(ptr_, offset_);
ref -= offset;
return ref;
}
/// Computes the difference in elements between references
CUTLASS_HOST_DEVICE
ptrdiff_t operator-(SubbyteReference ref) const {
return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_);
}
/// Explicit cast to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(get());
}
/// Explicit cast to signed 64-bit integer
CUTLASS_HOST_DEVICE
explicit operator int64_t() const {
return int64_t(get());
}
/// Explicit cast to unsigned 64-bit integer
CUTLASS_HOST_DEVICE
explicit operator uint64_t() const {
return uint64_t(get());
}
/// Explicit cast to float
CUTLASS_HOST_DEVICE
explicit operator float() const {
return float(get());
}
/// Explicit cast to double
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(get());
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template<typename T> using _war = T;
template <
typename Element_, /// CUTLASS numeric element type.
typename Storage_ /// Underlying basic storage type.
>
class SubbyteReference<Element_, Storage_,
typename platform::enable_if<sizeof_bits<Storage_>::value % sizeof_bits<Element_>::value != 0>::type> {
public:
using Element = Element_;
///! Note: Storage unit could not be divisibale by Element,
/// Type element may be stored across 2 storage units, so need a storage vector to hold integer
/// number of objects of type Element.
using StorageUnit = Storage_;
static int const kBitsStoredVec = cutlass::lcm_cxx11(sizeof_bits<Element>::value, sizeof_bits<StorageUnit>::value);
static int const kNumStorageUnitPerStoredVec = kBitsStoredVec / sizeof_bits<StorageUnit>::value;
using StorageVec = StorageUnit[kNumStorageUnitPerStoredVec];
using StorageVecPointer = StorageVec *;
using CudaAtomicType = typename platform::conditional<
sizeof_bits<StorageUnit>::value == 16,
uint32_t,
uint64_t
>::type;
static_assert(sizeof_bits<Element>::value <= sizeof_bits<StorageVec>::value,
"Size of Element must not be greater than StorageVec.");
static_assert(!(sizeof_bits<StorageVec>::value % sizeof_bits<Element>::value),
"StorageVec must be divisible by Element");
private:
///! Number of elements per storage vector
int const kElementsPerVector = sizeof_bits<StorageVec>::value / sizeof_bits<Element>::value;
///! Bit mask for storage unit.
StorageUnit const kMask = (StorageUnit(1) << sizeof_bits<Element>::value) - StorageUnit(1);
/// Pointer to array containing element
_war<StorageVecPointer> ptr_;
/// Offset (in units of elements) from pointer.
///
/// Invariant: must always be in range [0, kElementsPerVector)
int offset_;
/// Element may be stored across 2 storage unit.
/// Low storage unit index in StorageVec
/// High storage unit index in StorageVec
int low_storage_unit_idx_;
int high_storage_unit_idx_;
/// Full Mask to extract the entire element
uint64_t full_element_mask_;
/// Mask to extract the Element from Low storage unit and High storage unit.
StorageUnit low_storage_mask_;
StorageUnit high_storage_mask_;
/// Start bit index inside the storage unit.
int start_bit_idx_;
private:
CUTLASS_HOST_DEVICE
void update_element_status() {
int num_bits = offset_ * sizeof_bits<Element>::value;
start_bit_idx_ = num_bits % sizeof_bits<StorageUnit>::value;
low_storage_unit_idx_ = num_bits / sizeof_bits<StorageUnit>::value;
high_storage_unit_idx_ = sizeof_bits<StorageUnit>::value - (start_bit_idx_) < sizeof_bits<Element>::value
? low_storage_unit_idx_ + 1 : low_storage_unit_idx_;
full_element_mask_ = uint64_t(kMask) << start_bit_idx_;
low_storage_mask_ = StorageUnit(full_element_mask_ & ~StorageUnit(0));
high_storage_mask_ = StorageUnit((full_element_mask_ >> sizeof_bits<StorageUnit>::value) & ~StorageUnit(0));
}
public:
CUTLASS_HOST_DEVICE
SubbyteReference(): ptr_(nullptr), offset_(0) { }
/// Constructor
CUTLASS_HOST_DEVICE
SubbyteReference(
Element *ptr, /// pointer to memory
int64_t offset /// logical offset in units of Element
):
ptr_(reinterpret_cast<StorageVecPointer>(ptr)),
offset_(0) {
int64_t offset_in_vectors = offset / kElementsPerVector;
int64_t offset_in_elements = offset % kElementsPerVector;
ptr_ += offset_in_vectors;
offset_ = int(offset_in_elements);
update_element_status();
}
/// Constructor
CUTLASS_HOST_DEVICE
SubbyteReference(
Element *ptr = nullptr
): SubbyteReference(ptr, 0) { }
/// Gets StorageVec pointer
CUTLASS_HOST_DEVICE
StorageVecPointer storage_pointer() const {
return ptr_;
}
/// Gets StorageVec pointer
CUTLASS_HOST_DEVICE
Element * operator&() const {
return reinterpret_cast<Element *>(ptr_);
}
/// Gets element offset within StorageVec vector
CUTLASS_HOST_DEVICE
int element_offset() const {
return offset_;
}
/// Unpacks an element from memory
CUTLASS_HOST_DEVICE
Element get() const {
StorageUnit low_bits = (*ptr_)[low_storage_unit_idx_] & low_storage_mask_;
StorageUnit high_bits = low_storage_unit_idx_ != high_storage_unit_idx_ ? (*ptr_)[high_storage_unit_idx_] & high_storage_mask_ : 0;
uint64_t full_item = ((uint64_t)high_bits << sizeof_bits<StorageUnit>::value) | low_bits;
uint8_t result = uint8_t(full_item >> start_bit_idx_);
return reinterpret_cast<Element const &>(result);
}
/// Stores an element to memory
CUTLASS_HOST_DEVICE
SubbyteReference & set(Element const &x) {
uint64_t item = static_cast<uint64_t>((reinterpret_cast<uint8_t const &>(x) & kMask)) << start_bit_idx_;
StorageUnit low_new_bits = StorageUnit(item & ~StorageUnit(0));
StorageUnit high_new_bits = StorageUnit(item >> sizeof_bits<StorageUnit>::value);
StorageUnit const kLowUpdateMask = StorageUnit((~full_element_mask_) & (~StorageUnit(0)));
StorageUnit const kHighUpdateMask = StorageUnit(((~full_element_mask_) >> sizeof_bits<StorageUnit>::value) & (~StorageUnit(0)));
#if defined(__CUDA_ARCH__)
//
// Homebrew read-modify-write
//
if(high_storage_unit_idx_ != low_storage_unit_idx_){
/// Only need update 2 storage unit at once.
/// consider misaligned address issue, we need to do atomicCAS twice
StorageUnit original_low_bits, original_high_bits, update_low_bits, update_high_bits;
do {
original_low_bits = ((*ptr_)[low_storage_unit_idx_]);
update_low_bits = (original_low_bits & kLowUpdateMask) | low_new_bits;
original_low_bits = atomicCAS(&((*ptr_)[low_storage_unit_idx_]), original_low_bits, update_low_bits);
} while (update_low_bits != original_low_bits);
do {
original_high_bits = ((*ptr_)[high_storage_unit_idx_]);
update_high_bits = (original_high_bits & kHighUpdateMask) | high_new_bits;
original_high_bits = atomicCAS(&((*ptr_)[high_storage_unit_idx_]), original_high_bits, update_high_bits);
} while (update_high_bits != original_high_bits);
}
else {
/// Only need update 1 storage unit.
StorageUnit original, updated;
do {
original = ((*ptr_)[low_storage_unit_idx_]);
updated = (original & kLowUpdateMask) | low_new_bits;
original = atomicCAS(&((*ptr_)[low_storage_unit_idx_]), original, updated);
} while (updated != original);
}
#else
StorageUnit update_low_bits = ((*ptr_)[low_storage_unit_idx_] & kLowUpdateMask) | low_new_bits;
StorageUnit update_high_bits = ((*ptr_)[high_storage_unit_idx_] & kHighUpdateMask) | high_new_bits;
(*ptr_)[low_storage_unit_idx_] = update_low_bits;
if(low_storage_unit_idx_ != high_storage_unit_idx_)
(*ptr_)[high_storage_unit_idx_] = update_high_bits;
#endif
return *this;
}
////
/// Unpacks an element from memory
CUTLASS_HOST_DEVICE
operator Element() const {
return get();
}
/// Stores an element to memory
CUTLASS_HOST_DEVICE
SubbyteReference &operator=(Element const & x) {
return set(x);
}
/// Stores an element to memory
CUTLASS_HOST_DEVICE
SubbyteReference &operator=(SubbyteReference const & x) {
return set(x.get());
}
/// Stores an element to memory
CUTLASS_HOST_DEVICE
SubbyteReference &operator=(
ConstSubbyteReference<Element, StorageVec> const &x) {
return set(x.get());
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
SubbyteReference &operator+=(int offset) {
offset += offset_;
int offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = offset % kElementsPerVector;
ptr_ += offset_in_vectors;
offset_ = offset_in_elements;
update_element_status();
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
SubbyteReference &operator+=(long long offset) {
offset += offset_;
long long offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = int(offset % kElementsPerVector);
ptr_ += offset_in_vectors;
offset_ = offset_in_elements;
update_element_status();
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
SubbyteReference &operator-=(int offset) {
int offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = offset % kElementsPerVector;
ptr_ -= offset_in_vectors;
offset_ -= offset_in_elements;
if (offset_ < 0) {
offset_ += kElementsPerVector;
--ptr_;
}
update_element_status();
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
SubbyteReference &operator-=(long long offset) {
long long offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = int(offset % kElementsPerVector);
ptr_ -= offset_in_vectors;
offset_ -= offset_in_elements;
if (offset_ < 0) {
offset_ += kElementsPerVector;
--ptr_;
}
update_element_status();
return *this;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
SubbyteReference operator+(int offset) const {
SubbyteReference ref(ptr_, offset_);
ref += offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
SubbyteReference operator+(long long offset) const {
SubbyteReference ref(ptr_, offset_);
ref += offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
SubbyteReference operator-(int offset) const {
SubbyteReference ref(ptr_, offset_);
ref -= offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
SubbyteReference operator-=(long long offset) const {
SubbyteReference ref(ptr_, offset_);
ref -= offset;
return ref;
}
/// Computes the difference in elements between references
CUTLASS_HOST_DEVICE
ptrdiff_t operator-(SubbyteReference ref) const {
return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_);
}
/// Explicit cast to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(get());
}
/// Explicit cast to signed 64-bit integer
CUTLASS_HOST_DEVICE
explicit operator int64_t() const {
return int64_t(get());
}
/// Explicit cast to unsigned 64-bit integer
CUTLASS_HOST_DEVICE
explicit operator uint64_t() const {
return uint64_t(get());
}
/// Explicit cast to float
CUTLASS_HOST_DEVICE
explicit operator float() const {
return float(get());
}
/// Explicit cast to double
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(get());
}
};
template<typename T> using _war = T;
template <
typename Element_, /// CUTLASS numeric element type.
typename Storage_ /// Underlying storage type. Must be able to hold an integer
>
class ConstSubbyteReference<Element_, Storage_,
typename platform::enable_if<sizeof_bits<Storage_>::value % sizeof_bits<Element_>::value != 0>::type> {
public:
using Element = Element_;
///! Note: Storage unit could not be divisibale by Element,
/// Type element may be stored across 2 storage units, so need a storage vector to hold integer
/// number of objects of type Element.
using StorageUnit = Storage_;
static int const kBitsStoredVec = cutlass::lcm_cxx11(sizeof_bits<Element>::value, sizeof_bits<StorageUnit>::value);
static int const kNumStorageUnitPerStoredVec = kBitsStoredVec / sizeof_bits<StorageUnit>::value;
using StorageVec = StorageUnit[kNumStorageUnitPerStoredVec];
using StorageVecPointer = StorageVec const *;
using CudaAtomicType = typename platform::conditional<
sizeof_bits<StorageUnit>::value == 16,
uint32_t,
uint64_t
>::type;
static_assert(sizeof_bits<Element>::value <= sizeof_bits<StorageVec>::value,
"Size of Element must not be greater than StorageVec.");
static_assert(!(sizeof_bits<StorageVec>::value % sizeof_bits<Element>::value),
"StorageVec must be divisible by Element");
private:
///! Number of elements per storage vector
int const kElementsPerVector = sizeof_bits<StorageVec>::value / sizeof_bits<Element>::value;
///! Bit mask for storage unit.
StorageUnit const kMask = (StorageUnit(1) << sizeof_bits<Element>::value) - StorageUnit(1);
/// Pointer to array containing element
_war<StorageVecPointer> ptr_;
/// Offset (in units of elements) from pointer.
///
/// Invariant: must always be in range [0, kElementsPerVector)
int offset_;
/// Element may be stored across 2 storage unit.
/// Low storage unit index in StorageVec
/// High storage unit index in StorageVec
int low_storage_unit_idx_;
int high_storage_unit_idx_;
/// Full Mask to extract the entire element
uint64_t full_element_mask_;
/// Mask to extract the Element from Low storage unit and High storage unit.
StorageUnit low_storage_mask_;
StorageUnit high_storage_mask_;
/// Start bit index inside the storage unit.
int start_bit_idx_;
private:
CUTLASS_HOST_DEVICE
void update_element_status() {
int num_bits = offset_ * sizeof_bits<Element>::value;
start_bit_idx_ = num_bits % sizeof_bits<StorageUnit>::value;
low_storage_unit_idx_ = num_bits / sizeof_bits<StorageUnit>::value;
high_storage_unit_idx_ = sizeof_bits<StorageUnit>::value - (start_bit_idx_) < sizeof_bits<Element>::value
? low_storage_unit_idx_ + 1 : low_storage_unit_idx_;
full_element_mask_ = uint64_t(kMask) << start_bit_idx_;
low_storage_mask_ = StorageUnit(full_element_mask_ & ~StorageUnit(0));
high_storage_mask_ = StorageUnit((full_element_mask_ >> sizeof_bits<StorageUnit>::value) & ~StorageUnit(0));
}
public:
CUTLASS_HOST_DEVICE
ConstSubbyteReference(): ptr_(nullptr), offset_(0) { }
/// Constructor
CUTLASS_HOST_DEVICE
ConstSubbyteReference(
Element const *ptr, /// pointer to memory
int64_t offset /// logical offset in units of Element
):
ptr_(reinterpret_cast<StorageVecPointer>(ptr)),
offset_(0) {
int64_t offset_in_vectors = offset / kElementsPerVector;
int64_t offset_in_elements = offset % kElementsPerVector;
ptr_ += offset_in_vectors;
offset_ = int(offset_in_elements);
update_element_status();
}
/// Constructor
CUTLASS_HOST_DEVICE
ConstSubbyteReference(
Element *ptr = nullptr
): ConstSubbyteReference(ptr, 0) { }
/// Gets storage pointer
CUTLASS_HOST_DEVICE
StorageVecPointer storage_pointer() const {
return ptr_;
}
/// Gets element offset within storage vector
CUTLASS_HOST_DEVICE
int element_offset() const {
return offset_;
}
/// Unpacks an element from memory
CUTLASS_HOST_DEVICE
Element get() const {
StorageUnit low_bits = (*ptr_)[low_storage_unit_idx_] & low_storage_mask_;
StorageUnit high_bits = low_storage_unit_idx_ != high_storage_unit_idx_ ? (*ptr_)[high_storage_unit_idx_] & high_storage_mask_ : 0;
uint64_t full_item = ((uint64_t)high_bits << sizeof_bits<StorageUnit>::value) | low_bits;
uint8_t result = uint8_t(full_item >> start_bit_idx_);
return reinterpret_cast<Element const &>(result);
}
/// Unpacks an element from memory
CUTLASS_HOST_DEVICE
operator Element() const {
return get();
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference &operator+=(int offset) {
offset += offset_;
int offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = offset % kElementsPerVector;
ptr_ += offset_in_vectors;
offset_ = offset_in_elements;
update_element_status();
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference &operator+=(long long offset) {
offset += offset_;
long long offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = int(offset % kElementsPerVector);
ptr_ += offset_in_vectors;
offset_ = offset_in_elements;
update_element_status();
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference &operator-=(int offset) {
int offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = offset % kElementsPerVector;
ptr_ -= offset_in_vectors;
offset_ -= offset_in_elements;
if (offset_ < 0) {
offset_ += kElementsPerVector;
--ptr_;
}
update_element_status();
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference &operator-=(long long offset) {
long long offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = int(offset % kElementsPerVector);
ptr_ -= offset_in_vectors;
offset_ -= offset_in_elements;
if (offset_ < 0) {
offset_ += kElementsPerVector;
--ptr_;
}
update_element_status();
return *this;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference operator+(int offset) const {
ConstSubbyteReference ref(ptr_, offset_);
ref += offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference operator+(long long offset) const {
ConstSubbyteReference ref(ptr_, offset_);
ref += offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference operator-(int offset) const {
ConstSubbyteReference ref(ptr_, offset_);
ref -= offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference operator-=(long long offset) const {
ConstSubbyteReference ref(ptr_, offset_);
ref -= offset;
return ref;
}
/// Computes the difference in elements between references
CUTLASS_HOST_DEVICE
ptrdiff_t operator-(ConstSubbyteReference ref) const {
return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_);
}
/// Explicit cast to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(get());
}
/// Explicit cast to signed 64-bit integer
CUTLASS_HOST_DEVICE
explicit operator int64_t() const {
return int64_t(get());
}
/// Explicit cast to unsigned 64-bit integer
CUTLASS_HOST_DEVICE
explicit operator uint64_t() const {
return uint64_t(get());
}
/// Explicit cast to float
CUTLASS_HOST_DEVICE
explicit operator float() const {
return float(get());
}
/// Explicit cast to double
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(get());
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, bool subbyte = (sizeof_bits<Element>::value < 8)>
struct ReferenceFactory;
template <typename Element>
struct ReferenceFactory<Element, false> {
///! Number of elements per storage vector
static int const kElementsPerVector = 1;
CUTLASS_HOST_DEVICE
static Element &get(Element *ptr, int64_t offset) {
return ptr[offset];
}
CUTLASS_HOST_DEVICE
static Element const &get(Element const *ptr, int64_t offset) {
return ptr[offset];
}
CUTLASS_HOST_DEVICE
static Element *add_pointer_offset(Element *ptr, int64_t offset) {
return ptr + offset;
}
CUTLASS_HOST_DEVICE
static Element const *add_pointer_offset(Element const *ptr, int64_t offset) {
return ptr + offset;
}
};
template <typename Element>
struct ReferenceFactory<Element, true> {
//
// Static methods
//
CUTLASS_HOST_DEVICE
static SubbyteReference<Element> get(Element *ptr, int64_t offset) {
return SubbyteReference<Element>(ptr, offset);
}
CUTLASS_HOST_DEVICE
static ConstSubbyteReference<Element> get(Element const *ptr,
int64_t offset) {
return ConstSubbyteReference<Element>(ptr, offset);
}
/// Helper to add an offset in number of elements, assuming this offset is divisible
/// by the vector size.
CUTLASS_HOST_DEVICE
static Element *add_pointer_offset(Element *ptr, int64_t offset_in_elements) {
return ptr + offset_in_elements * sizeof_bits<Element>::value / sizeof(Element) / 8;
}
/// Helper to add an offset in number of elements, assuming this offset is divisible
/// by the vector size.
CUTLASS_HOST_DEVICE
static Element const *add_pointer_offset(Element const *ptr, int64_t offset_in_elements) {
return ptr + offset_in_elements * sizeof_bits<Element>::value / sizeof(Element) / 8;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/subbyte_reference.h/0 | {
"file_path": "include/cutlass/subbyte_reference.h",
"repo_id": "include",
"token_count": 13259
} | 47 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of scale and bias vectors.
This iterator uses masks to guard out-of-bounds accesses.
It can be used to load the gamma and beta vectors of layernorm which is loop variant.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedScaleBiasVectorAccessIterator
///
template <typename ThreadblockShape,
typename Element,
typename Layout>
class PredicatedScaleBiasVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for fprop pitch-linear data.
///
template <typename ThreadblockShape_, typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::PitchLinear> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kThreads = ThreadblockShape::kContiguous / kElementsPerAccess;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Internal pointer to first access of tile
BytePointer pointer_;
TensorCoord thread_offset_;
int problem_size_k_;
/// Used for out-of-order visitation
bool is_residue_tile_;
bool guard_;
TensorCoord::Index residue_size_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Extent of tensor
int problem_size_k,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset) {
pointer_ = (thread_id < kThreads)
? reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(scale_pointer))
: reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(bias_pointer));
// Per-thread offset in logical coordinates of tensor
int thread_base = (thread_id < kThreads) ? 0 : kThreads;
problem_size_k_ = problem_size_k;
is_residue_tile_ = true;
residue_size_ = (problem_size_k_ - threadblock_offset.contiguous()) % ThreadblockShape::kContiguous;
if (residue_size_ == 0) {
residue_size_ = ThreadblockShape::kContiguous;
}
guard_ = ((thread_id - thread_base) * kElementsPerAccess) < residue_size_;
thread_offset_ =
threadblock_offset +
TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0);
set_iteration_index(0);
}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Extent of tensor
int problem_size_k,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorAccessIterator(problem_size_k,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {}
/// Advances an iterator along logical dimensions of matrix in units of whole threadblock tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
guard_ = threadIdx.x < kThreads * 2;
TensorCoord offset = is_residue_tile_ ?
TensorCoord(residue_size_ + ThreadblockShape::kContiguous * (tile_offset.contiguous() - 1), 0)
: TensorCoord(ThreadblockShape::kContiguous * tile_offset.contiguous(), 0);
thread_offset_ =
thread_offset_ +
offset;
is_residue_tile_ = false;
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
(thread_offset_.contiguous() * sizeof_bits<Element>::value / 8));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
return *this;
}
/// Increment and return an instance to self.
CUTLASS_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
guard_ &= (!enable);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return guard_;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename ThreadblockShape_,
typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::RowMajor> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedScaleBiasVectorAccessIterator<
layout::PitchLinearShape<ThreadblockShape::kColumn, ThreadblockShape::kRow>,
Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
///< Extent of tensor
int problem_size_k,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(problem_size_k, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
int problem_size_k, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorAccessIterator(problem_size_k,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// threadblock tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h/0 | {
"file_path": "include/cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h",
"repo_id": "include",
"token_count": 4311
} | 48 |
# CuTe Layout Algebra
CuTe provides an "algebra of `Layout`s" to support combining layouts in different ways. This algebra includes operations such as
* `Layout` functional composition,
* a notion of `Layout` "product" to reproduce one layout according to another, and
* a notion of `Layout` "divide" to split one layout according to another.
Common utilities for building complicated layouts from simpler ones depend on the `Layout` product. Common utilities for partitioning layouts (of data, for example) across other layouts (of threads, for example) depend on the `Layout` divide. All of these utilities rely on the functional composition of `Layout`s.
In this section, we'll build up the tools of the `Layout` algebra and explain some of these core operations in detail.
## Coalesce
In the previous section, we summarized `Layout`s with
> Layouts are functions from integers to integers.
The `coalesce` operation is a "simplify" on functions from integers to integers. If we only care about input integers, then we can manipulate the shape and number of modes of the `Layout` without changing it as a function. The only thing `coalesce` can't change is the `Layout`'s `size`.
More specifically, you can find the checked post-conditions in [the `coalesce` unit test](../../../test/unit/cute/core/coalesce.cpp), which we'll reproduce here:
```cpp
// @post size(@a result) == size(@a layout)
// @post depth(@a result) <= 1
// @post for all i, 0 <= i < size(@a layout), @a result(i) == @a layout(i)
Layout coalesce(Layout const& layout)
```
For example,
```cpp
auto layout = Layout<Shape <_2,Shape <_1,_6>>,
Stride<_1,Stride<_6,_2>>>{};
auto result = coalesce(layout); // _12:_1
```
where we can see the result has fewer modes and is "simpler." Indeed, this could save us a few operations in the coordinate mapping and index mapping (if those are performed dynamically).
So, how do we get there?
* We've already seen that column-major `Layout`s like `(_2,_4):(_1,_2)` act identically to `_8:_1` for 1-D coordinates.
* Modes with size static-1 will always produce a natural coordinate of static-0. They can be ignored no matter the stride.
Generalizing, consider a layout with just two integral modes, s0:d0 and s1:d1. Denote the result of coalescing this layout as s0:d0 ++ s1:d1. Then, there are four cases:
1. `s0:d0 ++ _1:d1 => s0:d0`. Ignore modes with size static-1.
2. `_1:d0 ++ s1:d1 => s1:d1`. Ignore modes with size static-1.
3. `s0:d0 ++ s1:s0*d0 => s0*s1:d0`. If the second mode's stride is the product of the first mode's size and stride, then they can be combined.
4. `s0:d0 ++ s1:d1 => (s0,s1):(d0,d1)`. Else, nothing can be done and they must be treated separately.
That's it! We can flatten any layout and apply the above binary operation to each pair of adjacent modes in order to "coalesce" the modes of the layout.
### By-mode Coalesce
Obviously, sometimes we do care about the shape of our `Layout`, but would still like to coalesce. For example, I have a 2-D `Layout` and I would like the result to remain 2-D.
For this reason, there's an overload of `coalesce` that takes an additional parameter
```cpp
// Apply coalesce at the terminals of trg_profile
Layout coalesce(Layout const& layout, IntTuple const& trg_profile)
```
which can be used as follows
```cpp
auto a = Layout<Shape <_2,Shape <_1,_6>>,
Stride<_1,Stride<_6,_2>>>{};
auto result = coalesce(a, Step<_1,_1>{}); // (_2,_6):(_1,_2)
// Identical to
auto same_r = make_layout(coalesce(layout<0>(a)),
coalesce(layout<1>(a)));
```
This function is recursing into `Step<_1,_1>{}` and applying `coalesce` to the corresponding sublayout whenever it sees an integer (the values don't matter, they're just flags) rather than a tuple.
> This theme of defining an operation that treats a `Layout` as a "1-D" function from integers to integers and then generalizing to use it for an arbitrarily shaped layout will be a common one!
## Composition
Functional composition of `Layout`s is the core of CuTe and is used in just about every higher-level operation.
Starting again from the observation that `Layout`s are just functions from integers to integers, we can define functional composition that results in another `Layout`. First, an example.
```text
Functional composition, R := A o B
R(c) := (A o B)(c) := A(B(c))
Example
A = (6,2):(8,2)
B = (4,3):(3,1)
R( 0) = A(B( 0)) = A(B(0,0)) = A( 0) = A(0,0) = 0
R( 1) = A(B( 1)) = A(B(1,0)) = A( 3) = A(3,0) = 24
R( 2) = A(B( 2)) = A(B(2,0)) = A( 6) = A(0,1) = 2
R( 3) = A(B( 3)) = A(B(3,0)) = A( 9) = A(3,1) = 26
R( 4) = A(B( 4)) = A(B(0,1)) = A( 1) = A(1,0) = 8
R( 5) = A(B( 5)) = A(B(1,1)) = A( 4) = A(4,0) = 32
R( 6) = A(B( 6)) = A(B(2,1)) = A( 7) = A(1,1) = 10
R( 7) = A(B( 7)) = A(B(3,1)) = A(10) = A(4,1) = 34
R( 8) = A(B( 8)) = A(B(0,2)) = A( 2) = A(2,0) = 16
R( 9) = A(B( 9)) = A(B(1,2)) = A( 5) = A(5,0) = 40
R(10) = A(B(10)) = A(B(2,2)) = A( 8) = A(2,1) = 18
R(11) = A(B(11)) = A(B(3,2)) = A(11) = A(5,1) = 42
```
The absolutely amazing observation is that the function `R(c) = k` defined above can be written down as another `Layout`
```
R = ((2,2),3):((24,2),8)
```
AND
```
compatible(B, R)
```
That is, every coordinate of `B` can also be used as a coordinate of `R`. This is an expected property of functional composition because `B` defines the *domain* of `R`.
You can find many examples and checked post-conditions in [the `composition` unit test](../../../test/unit/cute/core/composition.cpp). The post-conditions are precisely as we just stated.
```cpp
// @post compatible(@a layout_b, @a result)
// @post for all i, 0 <= i < size(@a layout_b), @a result(i) == @a layout_a(@a layout_b(i)))
Layout composition(LayoutA const& layout_a, LayoutB const& layout_b)
```
### Computing Composition
First, a few observations:
* `B = (B_0, B_1, ...)`. A layout can be expressed as the concatenation of its sublayouts.
* `A o B = A o (B_0, B_1, ...) = (A o B_0, A o B_1, ...)`. When `B` is injective, composition is left-distributive with concatenation.
With the above, we can assume without loss of generality that `B = s:d` is a layout with integral shape and stride. We can also assume that `A` is a flattened, coalesced layout.
When `A` is integral, `A = a:b`, the result is rather trivial: `R = A o B = a:b o s:d = s:(b*d)`. But when `A` is multimodal, we need to be more careful.
Put into words, `A o B = A o s:d`, for integral `s` and `d` means that we want (1) every `d`th element of `A`, and then (2) keep the first `s` of those strided elements.
1. Every `d`th element of `A` can be computed by "dividing out" the first `d` elements from the shape of `A`. For an array of integers representing the shape, this is computed as
```cpp
void shape_div(int* shapeA, int N, int& strideB) {
for (int i = 0; i < N; ++i) {
assert(shapeA[i] % strideB == 0 or
strideB % shapeA[i] == 0);
int new_shape = ceil_div(shapeA[i], strideB);
int new_stride = ceil_div(strideB, shapeA[i]);
shapeA[i] = new_shape;
strideB = new_stride;
}
}
```
which progressively "removes" the first `strideB` elements from `shapeA` starting from the left. For example,
* `(6,2) / 2 => (3,2)`
* `(6,2) / 3 => (2,2)`
* `(6,2) / 6 => (1,2)`
* `(6,2) / 12 => (1,1)`
* `(3,6,2,8) / 6 => (1,3,2,8)`
* `(3,6,2,8) / 9 => (1,2,2,8)`
* `(42,16,3) / 2 => (21,16,3)`
* `(42,16,3) / 6 => ( 7,16,3)`
As you may have noticed, we can only divide shapes by certain values and get a sensible result. This is called the **divisibility condition** and is enforced by the `assert` in the above code and statically checked in CuTe when possible.
2. The first `s` elements of the strided `A` layout can be computed by "modding out" the first `s` elements from the shape of `A`. For an array of integers representing the shape, this is computed as
```cpp
void shape_mod(int* shapeA, int N, int& shapeB) {
for (int i = 0; i < N; ++i) {
assert(shapeA[i] % shapeB == 0 or
shapeB % shapeA[i] == 0);
int new_shapeA = min(shapeA[i], shapeB);
int new_shapeB = ceil_div(shapeB, shapeA[i]);
shapeA[i] = new_shapeA;
shapeB = new_shapeB;
}
}
```
which progressibly "keeps" the first `shapeB` elements from `shapeA` starting from the left. For example,
* `(6,2) % 2 => (2,1)`
* `(6,2) % 3 => (3,1)`
* `(6,2) % 6 => (6,1)`
* `(6,2) % 12 => (6,2)`
* `(3,6,2,8) % 6 => (3,2,1,1)`
* `(3,6,2,8) % 9 => (3,3,1,1)`
* `(1,2,2,8) % 2 => (1,2,1,1)`
* `(1,2,2,8) % 16 => (1,2,2,4)`
Again, this operation must satisfy the divisibility condition to yield a sensible result. This is enforced by the `assert` in the above code and statically checked in CuTe when possible.
Clearly, CuTe does not use arrays to store shapes or strides and the above code is for explication only. CuTe works with shapes and strides as `IntTuple`s and the implementation is expressed as algorithmic `fold`s which carefully account for static and dynamic integers.
#### Example 1 -- Reshape a layout into a matrix
`20:2 o (5,4):(4,1)`. Composition formulation.
This describes interpreting the layout `20:2`
as a 5x4 matrix in a row-major order.
1. ` = 20:2 o (5:4,4:1)`. Layout `(5,4):(4,1)` as concatenation of sublayouts.
2. ` = (20:2 o 5:4, 20:2 o 4:1)`. Left distributivity.
* `20:2 o 5:4 => 5:8`. Trivial case.
* `20:2 o 4:1 => 4:2`. Trivial case.
3. ` = (5:8, 4:2)`. Composed Layout as concatenation of sublayouts.
4. ` = (5,4):(8,2)`. Final composed layout.
#### Example 2 -- Reshape a layout into a matrix
`(10,2):(16,4) o (5,4):(1,5)`
This describes interpreting the layout `(10,2):(16,4)`
as a 5x4 matrix in a column-major order.
1. ` = (10,2):(16,4) o (5:1,4:5)`. Layout `(5,4):(1,5)` as concatenation of sublayouts.
2. ` = ((10,2):(16,4) o 5:1, (10,2):(16,4) o 4:5)`. Left distributivity.
* `(10,2):(16,4) o 5:1 => (5,1):(16,4)`. Mod out the shape `5`.
* `(10,2):(16,4) o 4:5 => (2,2):(80,4)`. Div out the stride `5`.
3. ` = ((5,1):(16,4), (2,2):(80,4))`. Composed Layout as concatenation of sublayouts.
4. ` = (5:16, (2,2):(80,4))`. By-mode coalesce.
5. ` = (5,(2,2))):(16,(80,4))`. Final composed layout.
We get exactly this result with CuTe
if we use compile-time shapes and strides.
The following C++ code prints `(_5,(_2,_2)):(_16,(_80,_4))`.
```cpp
Layout a = make_layout(make_shape (Int<10>{}, Int<2>{}),
make_stride(Int<16>{}, Int<4>{}));
Layout b = make_layout(make_shape (Int< 5>{}, Int<4>{}),
make_stride(Int< 1>{}, Int<5>{}));
Layout c = composition(a, b);
print(c);
```
If we use dynamic integers, the following C++ code prints `((5,1),(2,2)):((16,4),(80,4))`.
```cpp
Layout a = make_layout(make_shape (10, 2),
make_stride(16, 4));
Layout b = make_layout(make_shape ( 5, 4),
make_stride( 1, 5));
Layout c = composition(a, b);
print(c);
```
The results may _look_ different but are the mathematically the same. The 1s in the shape don't affect the layout as a mathematical function from 1-D coordinates to integers or as a function from 2-D coordinates to integers. In the dynamic case, CuTe can not coalesce the dynamic size-1 modes to "simplify" the layout due to the static rank and type of the tuples containing them.
### By-mode Composition
Similar to by-mode `coalesce` and building up to a generic tiling operation, sometimes we do care about the shape of the `A` layout and would still like to apply `composition` to individual modes. For example, I have a 2-D `Layout` and would like some sublayout of the elements down the columns and another sublayout of elements across the rows.
For this reason, `composition` also works when its second parameter -- the `B` -- is a `Tiler`. In general, a tiler is a layout or a tuple-of-layouts (note the generalization on `IntTuple`), which can be used as follows
```cpp
// (12,(4,8)):(59,(13,1))
auto a = make_layout(make_shape (12,make_shape ( 4,8)),
make_stride(59,make_stride(13,1)));
// <3:4, 8:2>
auto tiler = make_tile(Layout<_3,_4>{}, // Apply 3:4 to mode-0
Layout<_8,_2>{}); // Apply 8:2 to mode-1
// (_3,(2,4)):(236,(26,1))
auto result = composition(a, tiler);
// Identical to
auto same_r = make_layout(composition(layout<0>(a), get<0>(tiler)),
composition(layout<1>(a), get<1>(tiler)));
```
We often use the `<LayoutA, LayoutB, ...>` notation to distinguish `Tiler`s from the concatenation-of-sublayouts notation `(LayoutA, LayoutB, ...)` that we used previously.
The `result` in the above code can be depicted as the 3x8 sublayout of the original layout highlighted in the figure below.
<p align="center">
<img src="../../images/cute/composition1.png" alt="composition1.png" height="250"/>
</p>
For convenience, CuTe also interprets `Shape`s as a tiler as well. A `Shape` is interpreted as tuple-of-layouts-with-stride-1:
```cpp
// (12,(4,8)):(59,(13,1))
auto a = make_layout(make_shape (12,make_shape ( 4,8)),
make_stride(59,make_stride(13,1)));
// (8, 3)
auto tiler = make_shape(Int<3>{}, Int<8>{});
// Equivalent to <3:1, 8:1>
// auto tiler = make_tile(Layout<_3,_1>{}, // Apply 3:1 to mode-0
// Layout<_8,_1>{}); // Apply 8:1 to mode-1
// (_3,(4,2)):(59,(13,1))
auto result = composition(a, tiler);
```
where `result` can be depicted as the 3x8 sublayout of the original layout highlighted in the figure below.
<p align="center">
<img src="../../images/cute/composition2.png" alt="composition2.png" height="250"/>
</p>
## Composition Tilers
In summary, a `Tiler` is one of the following objects.
1. A `Layout`.
2. A tuple of `Tiler`s.
3. A `Shape`, which will be interpreted as a tiler of `Layout`s with stride-1.
Any of the above can be used as the second argument in `composition`. With (1), we think of the `composition` as between two functions from integers to integers, no matter the ranks of the layouts. With (2) and (3), the `composition` is performed on each pair of corresponding modes of `A` and `B`, until case (1) is found.
This allows composition to be applied by-mode to retrieve arbitrary sublayouts of specified modes of a tensor ("Give me the 3x5x8 subblock of this MxNxL tensor") but also allows entire tiles of data to be reshaped and reordered as if they were 1-D vectors ("Reorder this 8x16 block of data into a 32x4 block using this weird order of elements"). We will see the by-mode cases appear often when we are tiling for threadblocks in examples that follow. We will see 1-D reshaping and reordering when we want to apply arbitrary partitioning patterns for threads and values in MMAs in examples that follow.
## Complement
Before getting to "product" and "divide," we need one more operation. We can think of `composition` as a layout `B` that is "selecting" certain coordinates from another layout `A`. But what about the coordinates that aren't "selected"? To implement generic tiling, we want to be able to select arbitrary elements -- the tile -- and to describe the layout of those tiles -- the leftovers, or the "rest."
The `complement` of a layout attempts to find another layout that represents the "rest" -- the elements that aren't touched by the layout.
You can find many examples and checked post-conditions in [the `complement` unit test](../../../test/unit/cute/core/complement.cpp). The post-conditions include
```cpp
// @post cosize(make_layout(@a layout_a, @a result))) >= size(@a cotarget)
// @post cosize(@a result) >= round_up(size(@a cotarget), cosize(@a layout_a))
// @post for all i, 1 <= i < size(@a result),
// @a result(i-1) < @a result(i)
// @post for all i, 1 <= i < size(@a result),
// for all j, 0 <= j < size(@a layout_a),
// @a result(i) != @a layout_a(j)
Layout complement(LayoutA const& layout_a, Shape const& cotarget)
```
That is, the complement `R` of a layout `A` with respect to a Shape (IntTuple) `M` satisfies the following properties.
1. The size (and cosize) of `R` is *bounded* by `size(M)`.
2. `R` is *ordered*. That is, the strides of `R` are positive and increasing. This means that `R` is unique.
3. `A` and `R` have *disjoint* codomains. `R` attempts to "complete" the codomain of `A`.
The `cotarget` parameter above is most commonly an integer -- you can see we only use `size(cotarget)` above. However, sometimes it is useful to specify an integer that has static properties. For example, `28` is a dynamic integer and `(_4,7)` is a shape with size `28` that is statically known to be divisible by `_4`. Both will produce the same `complement` mathematically, but the extra information can used by `complement` to preserve the staticness of the result as much as possible.
### Complement Examples
`complement` is most effective on static shapes and strides, so consider all integers below to be static. Similar examples for dynamic shapes and strides as well as IntTuple `cotarget` can be found in [the unit test](../../../test/unit/cute/core/complement.cpp).
* `complement(4:1, 24)` is `6:4`. Note that `(4,6):(1,4)` has cosize `24`. The layout `4:1` is effectively repeated 6 times with `6:4`.
* `complement(6:4, 24)` is `4:1`. Note that `(6,4):(4,1)` has cosize `24`. The "hole" in `6:4` is filled with `4:1`.
* `complement((4,6):(1,4), 24)` is `1:0`. Nothing needs to be appended.
* `complement(4:2, 24)` is `(2,3):(1,8)`. Note that `(4,(2,3)):(2,(1,8))` has cosize `24`. The "hole" in `4:2` is filled with `2:1` first, then everything is repeated 3 times with `3:8`.
* `complement((2,4):(1,6), 24)` is `3:2`. Note that `((2,4),3):((1,6),2)` has cosize `24` and produces unique indices.
* `complement((2,2):(1,6), 24)` is `(3,2):(2,12)`. Note that `((2,2),(3,2)):((1,6),(2,12))` has cosize `24` and produces unique indices.
<p align="center">
<img src="../../images/cute/complement1.png" alt="complement1.png" height="75"/>
</p>
As a visualization, the above figure depicts the codomain of the last example. The image of the original layout `(2,2):(1,6)` is colored in gray. The complement effectively "repeats" the original layout (displayed in the other colors) such that the codomain size of the result is `24`. The complement `(3,2):(2,12)` can be viewed as the "layout of the repetition."
## Division (Tiling)
Finally, we can define the division of a `Layout` by another `Layout`. Functions that divide a layout into components are useful as a basis for tiling and partitioning layouts.
In this section, we'll define `logical_divide(Layout, Layout)`, which again considers all `Layout`s as 1-D functions from integers to integers, and then use that definition to create multidimensional `Layout` divides.
Informally, `logical_divide(A, B)` splits a layout `A` into two modes -- in the first mode are all elements pointed to by `B` and in the second mode are all elements not pointed to by `B`.
Formally, this can be written as
$A \oslash B := A \circ (B,B^*)$
and implemented as
```cpp
template <class LShape, class LStride,
class TShape, class TStride>
auto logical_divide(Layout<LShape,LStride> const& layout,
Layout<TShape,TStride> const& tiler)
{
return composition(layout, make_layout(tiler, complement(tiler, size(layout))));
}
```
Note that this is defined only in terms of concatenation, composition, and complement.
So what is that?
> in the first mode are all elements pointed to by `B`
This is clearly composition, `A o B`.
> in the second mode are all elements not pointed to by `B`
The elements NOT pointed to by `B` sounds like a complement, `B*`, up to the size of `A`. As we've seen above in the `complement` section, this can be described as the "layout of the repetition of `B`." If `B` is the "tiler", then `B*` is the layout of the tiles.
### Logical Divide 1-D Example
Consider tiling the 1-D layout `A = (4,2,3):(2,1,8)` with the tiler `B = 4:2`. Informally, this means that we have a 1-D vector of 24 elements in some storage order defined by `A` and we want to extract tiles of 4 elements strided by 2.
This is computed in the three steps described in the implementation above.
* Complement of `B = 4:2` under `size(A) = 24` is `B* = (2,3):(1,8)`.
* Concantenation of `(B,B*) = (4,(2,3)):(2,(1,8))`.
* Composition of `A = (4,2,3):(2,1,8)` with `(B,B*)` is then `((2,2),(2,3)):((4,1),(2,8))`.
<p align="center">
<img src="../../images/cute/divide1.png" alt="divide1.png" height="150"/>
</p>
The above figure depicts `A` as a 1-D layout with the elements pointed to by `B` highlighted in gray. The layout `B` describes our "tile" of data, and there are six of those tiles in `A` shown by each of the colors. After the divide, the first mode of the result is the tile of data and the second mode of the result iterates over each tile.
### Logical Divide 2-D Example
Using the `Tiler` concept defined above, this immediately generalizes to multidimensional tiling. The below example simply applies `layout_divide` by-mode to the cols and rows of a 2-D layout using a `Tiler`.
Similar to the 2-D composition example above, consider a 2-D layout `A = (9,(4,8)):(59,(13,1))` and want to apply `3:3` down the columns (mode-0) and `(2,4):(1,8)` across the rows (mode-1). This means the tiler can be written as `B = <3:3, (2,4):(1,8)>`.
<p align="center">
<img src="../../images/cute/divide2.png" alt="divide2.png" height="450"/>
</p>
The above figure depicts `A` as a 2-D layout with the elements pointed to by `B` highlighted in gray. The layout `B` describes our "tile" of data, and there are twelve of those tiles in `A` shown by each of the colors. After the divide, the first mode of each mode of the result is the tile of data and the second mode of each mode iterates over each tile. In that sense, this operation can be viewed as a kind of `gather` operation or as simply a permutation on the rows and cols.
Note that the first mode of each mode of the result is the sublayout `(3,(2,4)):(177,(13,2))` and is precisely the result we would have received if we had applied `composition` instead of `logical_divide`.
### Zipped, Tiled, Flat Divides
It's easy to see the tiles when they are highlighted in the images above, but working with them can still be awkward. How would you slice out the `3`rd tile or the `7`th tile or the `(1,2)`th tile so you could continue working on it?
Enter the convenience flavors of `logical_divide`. Suppose we have a `Layout` and a `Tiler` of some shape, then each operation will apply `logical_divide`, but potentially rearrange the modes into more convenient forms.
```text
Layout Shape : (M, N, L, ...)
Tiler Shape : <TileM, TileN>
logical_divide : ((TileM,RestM), (TileN,RestN), L, ...)
zipped_divide : ((TileM,TileN), (RestM,RestN,L,...))
tiled_divide : ((TileM,TileN), RestM, RestN, L, ...)
flat_divide : (TileM, TileN, RestM, RestN, L, ...)
```
For example, the `zipped_divide` function applies `logical_divide`, and then gathers the "subtiles" into a single mode and the "rest" into a single mode.
```cpp
// A: shape is (9,32)
auto layout_a = make_layout(make_shape (Int< 9>{}, make_shape (Int< 4>{}, Int<8>{})),
make_stride(Int<59>{}, make_stride(Int<13>{}, Int<1>{})));
// B: shape is (3,8)
auto tiler = make_tile(Layout<_3,_3>{}, // Apply 3:3 to mode-0
Layout<Shape <_2,_4>, // Apply (2,4):(1,8) to mode-1
Stride<_1,_8>>{});
// ((TileM,RestM), (TileN,RestN)) with shape ((3,3), (8,4))
auto ld = logical_divide(layout_a, tiler);
// ((TileM,TileN), (RestM,RestN)) with shape ((3,8), (3,4))
auto zd = zipped_divide(layout_a, tiler);
```
Then, the offset to the `3`rd tile is `zd(0,3)`. The offset to the `7`th tile is `zd(0,7)`. The offset to the `(1,2)`th tile is `zd(0,make_coord(1,2))`. The tile itself always has layout `layout<0>(zd)`. Indeed, it is always the case that
`layout<0>(zipped_divide(a, b)) == composition(a, b)`.
We note that `logical_divide` preserves the *semantics* of the modes while permuting the elements within those modes -- the `M`-mode of layout `A` is still the `M`-mode of the result and the `N`-mode of layout `A` is still the `N`-mode of the result.
This is not the case with `zipped_divide`. The mode-0 in the `zipped_divide` result is the `Tile` itself (of whatever rank the `Tiler` was) and mode-1 is the layout of those tiles. It doesn't always make sense to plot these as 2-D layouts, because the `M`-mode is now more aptly the "tile-mode" and the `N`-mode is more aptly the "rest-mode". Regardless, we still can plot the resulting layout as 2-D as shown below.
<p align="center">
<img src="../../images/cute/divide3.png" alt="divide3.png" height="450"/>
</p>
We've kept each tile as its color in the previous images for clarity. Clearly, iterating across tiles is now equivalent to iterating across a row of this layout and iterating over elements within a tile is equivalent to iterating down a column of this layout. As we'll see in the `Tensor` section, this can be used to great effect in partitioning within or across tiles of data.
## Product (Tiling)
Finally, we can define the product of a Layout by another Layout. In this section, we'll define `logical_product(Layout, Layout)`, which again considers all `Layout`s as 1-D functions from integers to integers, and then use that definition to create multidimensional `Layout` products.
Informally, `logical_product(A, B)` results in a two mode layout where the first mode is the layout `A` and the second mode is the layout `B` but with each element replaced by a "unique replication" of layout `A`.
Formally, this can be written as
$A \otimes B := (A, A^* \circ B)$
and implemented in CuTe as
```cpp
template <class LShape, class LStride,
class TShape, class TStride>
auto logical_product(Layout<LShape,LStride> const& layout,
Layout<TShape,TStride> const& tiler)
{
return make_layout(layout, composition(complement(layout, size(layout)*cosize(tiler)), tiler));
}
```
Note that this is defined only in terms of concatenation, composition, and complement.
So what is that?
> where the first mode is the layout `A`
This is clearly just a copy of `A`.
> the second mode is the layout `B` but with each element replaced by a "unique replication" of layout `A`.
The "unique replication" of layout `A` sounds like complement, `A*`, up to the cosize of `B`. As we've seen in the `complement` section, this can be described as the "layout of the repetition of `A`". If `A` is the "tile", then `A*` is the layout of repetitions that are available for `B`.
### Logical Product 1-D Example
Consider reproducing the 1-D layout `A = (2,2):(4,1)` according to `B = 6:1`. Informally, this means that we have a 1-D layout of 4 elements defined by `A` and we want to reproduce it 6 times.
This is computed in the three steps described in the implementation above.
* Complement of `A = (2,2):(4,1)` under `6*4 = 24` is `A* = (2,3):(2,8)`.
* Composition of `A* = (2,3):(2,8)` with `B = 6:1` is then `(2,3):(2,8)`.
* Concatenation of `(A,A* o B) = ((2,2),(2,3)):((4,1),(2,8))`.
<p align="center">
<img src="../../images/cute/product1.png" alt="product1.png" height="175"/>
</p>
The above figure depicts `A` and `B` as a 1-D layouts. The layout `B` describes the number and order of repetitions of `A` and they are colored for clarity. After the product, the first mode of the result is the tile of data and the second mode of the result iterates over each tile.
Note that the result is identical to the result of the 1-D Logical Divide example.
Of course, we can change the number and order of the tiles in the product by changing `B`.
<p align="center">
<img src="../../images/cute/product2.png" alt="product2.png" height="175"/>
</p>
For example, in the above image with `B = (4,2):(2,1)`, there are 8 repeated tiles instead of 6 and the tiles are in a different order.
### Logical Product 2-D Example
We can use the by-mode `tiler` strategies previously developed to write multidimensional products as well.
<p align="center">
<img src="../../images/cute/product2d.png" alt="product2d.png" height="250"/>
</p>
The above image demonstates the use of a `tiler` to apply `logical_product` by-mode. Despite this **not being the recommended approach**, the result is a rank-2 layout consisting of 2x5 row-major block that is tiled across a 3x4 column-major arrangement.
The reason **this is not the recommended approach** is that the `tiler B` in the above expression is highly unintuitive. In fact, it requires perfect knowledge of the shape and strides of `A` in order to construct. We would like to express "Tile Layout `A` according to Layout `B`" in a way that makes `A` and `B` independent and is much more intuitive.
#### Blocked and Raked Products
The `blocked_product(LayoutA, LayoutB)` and `raked_product(LayoutA, LayoutB)` are rank-sensitive transformations on top of 1-D `logical_product` that let us express the more intuitive `Layout` products that we most often want to express.
A key observation in the implementation of these functions are the compatibility post-conditions of `logical_product`:
```
// @post rank(result) == 2
// @post compatible(layout_a, layout<0>(result))
// @post compatible(layout_b, layout<1>(result))
```
Because `A` is always compatible with mode-0 of the result and `B` is always compatible with mode-1 of the result, if we made `A` and `B` the same rank then we could "reassociate" like-modes after the product. That is, the "column" mode in `A` could be combined with the "column" mode in `B` and the "row" mode in `A` could be combined with the "row" mode in `B`, etc.
This is exactly what `blocked_product` and `raked_product` do and it is why they are called rank-sensitive. Unlike other CuTe functions that take `Layout` arguments, these care about the top-level rank of the arguments so that each mode can be reassociated after the `logical_product`.
<p align="center">
<img src="../../images/cute/productblocked2d.png" alt="productblocked2d.png" height="250"/>
</p>
The above image shows the same result as the `tiler` approach, but with much more intuitive arguments. A 2x5 row-major layout is arranged as a tile in a 3x4 column-major arrangement. Also note that `blocked_product` went ahead and `coalesced` mode-0 for us.
Similarly, `raked_product` combines the modes slightly differently. Instead of the resulting "column" mode being constructed from the `A` "column" mode then the `B` "column" mode, the resulting "column" mode is constructed from the `B` "column" mode then the `A` "column" mode.
<p align="center">
<img src="../../images/cute/productraked2d.png" alt="productraked2d.png" height="250"/>
</p>
This results in the "tile" `A` now being interleaved or "raked" with the "layout-of-tiles" `B` instead of appearing as blocks. Other references call this a "cyclic distribution."
### Zipped and Tiled Products
Similar to `zipped_divide` and `tiled_divide`, the `zipped_product` and `tiled_product` simply rearrange the modes that result from a by-mode `logical_product`.
```text
Layout Shape : (M, N, L, ...)
Tiler Shape : <TileM, TileN>
logical_product : ((M,TileM), (N,TileN), L, ...)
zipped_product : ((M,N), (TileM,TileN,L,...))
tiled_product : ((M,N), TileM, TileN, L, ...)
flat_product : (M, N, TileM, TileN, L, ...)
```
| media/docs/cute/02_layout_algebra.md/0 | {
"file_path": "media/docs/cute/02_layout_algebra.md",
"repo_id": "media",
"token_count": 11055
} | 49 |
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Implicit GEMM API")
[README](../../README.md#documentation) > **Implicit GEMM Convolution**
# CUTLASS Convolution
Implicit GEMM is the formulation of a convolution operation as a GEMM (generalized matrix-matrix
product). Convolution takes an activation tensor and applies a sliding filter on it to produce an
output tensor.
## Introduction
This release of CUTLASS contains several artifacts related to convolution.
- [**Implicit GEMM Algorithm**](implicit_gemm_convolution.md#implicit-gemm-algorithm)
- [**CUTLASS Convolution Implementation**](implicit_gemm_convolution.md#cutlass-convolution-implementation)
- [**Convolution Examples**](implicit_gemm_convolution.md#convolution-example)
# Implicit GEMM Algorithm
2-D convolution may be mapped to matrix multiply
by first forming a _convolution matrix_ containing elements of the activations tensor,
then multiplying this by a matrix formed from the filters tensor.
The earliest form of this algorithm constructs the convolution matrix explicitly via an operation
conventionally referred to as `im2col`. The resulting matrix replicates each activation element by a factor
equal to the filter size, consuming additional storage capacity and memory bandwidth.
The _implicit GEMM_ algorithm is a variation on the blocked, hierarchical GEMM computation in CUDA.
Instead of constructing the convolution matrix explicitly,
it forms tiles of the convolution matrix on the fly
as data are loaded from global memory into Shared Memory
by carefully updating pointers and predicates.
Once the convolution matrix is formed in Shared Memory,
the existing warp-level GEMM components accumulate the result of
convolution and update the output tensor.
This section describes the structure of an efficient Implicit GEMM Convolution CUDA kernel
for Turing Tensor Cores.
## Mapping Convolution to GEMM
The forward convolutional layer computes an output tensor _y = conv(x, w)_ where x(NHWC), w(KRSC), and y(NPQK)
are 4-D tensors.
This computation may be described by the following analytic function.
```
y[n, p, q, k] = sum_c(sum_r(sum_s( x[n, f(p, r), g(q, s), c] * w[k, r, s, c] )))
```
where functions _f_ and _g_ are defined as follows.
```
f(p, r) = p * stride_h + R - r - 1 + pad_h
g(q, s) = q * stride_w + S - s - 1 + pad_w
```
A [host](/tools/util/include/cutlass/util/reference/host/convolution.h) and [device](/tools/util/include/cutlass/util/reference/device/convolution.h)
reference implementation are provided in the CUTLASS Utilities.
This computation may be mapped to the elements of a matrix product as follows.
```
C = gemm(A, B)
```
where
- A is a row-major matrix of extent _NHW_-by-_RSC_ containing activations
- B is a column-major matrix of extent _RSC_-by-_K_ containing filters
- C is a row-major matrix of extent _NPQ_-by-_K_ containing the output
Each element of the output matrix _Cij_ corresponds to an element in the output tensor y[n, p, q, k] according to
the following relation.
```
y[n, p, q, k] = Cij
```
where
```
i = q + Q * (p + P * n)
j = k
```
These relations may be inverted as follows.
```
k = j
n = i / (PQ)
residual = i % (PQ)
p = residual / Q
q = residual % Q
```
The triple loop nest iterating over CRS to accumulate the result may also be linearized and mapped to the inner
GEMM _K_ dimension (not to be confused with the filter tensor dimension _K_) by the following relations.
```
gemm_k = s + S * (r + R * c)
```
and inverse
```
c = gemm_k / (RS)
residual = gemm_k % (RS)
r = residual / S
s = residual % S
```
Given these equations, a GEMM triple loop nest could be augmented with tensor indexing as follows.
```c++
int GEMM_M = N * P * Q;
int GEMM_N = K;
int GEMM_K = C * R * S;
for (int gemm_i = 0; gemm_i < GEMM_M; ++gemm_i) {
for (int gemm_j = 0; gemm_j < GEMM_N; ++gemm_j) {
int n = gemm_i / (PQ);
int npq_residual = gemm_i % (PQ);
int p = npq_residual / Q;
int q = npq_residual % Q;
Accumulator accum = 0;
for (int gemm_k = 0; gemm_k < GEMM_K; ++gemm_k) {
int k = gemm_j;
int c = gemm_k / (RS);
int crs_residual = gemm_k % (RS);
int r = crs_residual / S;
int s = crs_residual % S;
int h = f(p, r);
int w = g(q, s);
ElementA a = tensor_A.at({n, h, w, c});
ElementB b = tensor_B.at({k, r, s, c});
accum += a * b;
}
C[gemm_i * K + gemm_j] = accum;
}
}
```
The [CUTLASS GEMM implementation](/media/docs/efficient_gemm.md) explicitly iterates over tiles. Consequently,
a tile iterator could be implemented to compute these functions analytically and load the appropriate
elements. However, the resulting modulo arithmetic would be computationally intensive, and overhead would
limit performance of a GEMM kernel targeting Turing Tensor Cores.
The following section describes how an efficient implementation may be implemented within the structure of
a hierarchical GEMM kernel targeting Tensor Cores.
# CUTLASS Convolution Implementation
To get the best performance, the following parameters are recommended.
- All tensors are 128-bit aligned NHWC tensors
- Channel count (C) is a multiple of 32 elements
- Filter count (K) is a multiple of 32 elements
This enables 128-bit vector memory acceses which lead to efficient CUDA kernels. Smaller alignment is supported even on tensor cores by setting AlignmentA and AlignmentB in `conv::kernel::DefaultConv2dFprop`, but the performance is lower than 128-bit aligned tensors.
# CUTLASS Device-level Convolution Operator
CUTLASS defines CUDA C++ templates accepting numerous template arguments to specialize the resulting
kernel by operation, data type, tile configuration, math instruction, and fused output operation.
In [turing_tensorop_conv2dfprop.cu](/examples/09_turing_tensorop_conv2dfprop/turing_tensorop_conv2dfprop.cu), a convolution
operation is defined as follows.
```c++
/// Define an Implicit GEMM convolution forward propagation (fprop) kernel
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, // data type of element a (mapped to activation for fprop)
LayoutInputA, // layout of element a (mapped to activation for fprop)
ElementInputB, // data type of element b (mapped to filters for fprop)
LayoutInputB, // layout of element b (mapped to filters for fprop)
ElementC, // data type of element c (mapped to output for fprop)
LayoutC, // layout of element c (mapped to output for fprop)
ElementAccumulator, // data type of internal accumulation
MMAOp, // opcode class tag
SmArch, // target SM architecture
ThreadblockShape, // shape of threadblock tile
WarpShape, // shape of warp-level GEMM tile
InstructionShape, // shape of target math instruction
EpilogueOp, // epilogue operator
SwizzleThreadBlock, // optional function to reorder threadblocks for locality
NumStages, // number of pipeline stages in threadblock-scoped GEMM
cutlass::arch::OpMultiplyAddSaturate, // math operation on data of element a and b
cutlass::conv::IteratorAlgorithm::kOptimized // global memory iterator algorithm
>::Kernel
```
This template is intended to be generic and cover all feasible configurations. The example specifies
the following concrete data types, layouts, and tile shapes.
```c++
/// Define an Implicit GEMM convolution forward propagation (fprop) kernel
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
cutlass::int4b_t, // data type of element a (mapped to activation for fprop)
cutlass::layout::TensorNHWC, // layout of element a (mapped to activation for fprop)
cutlass::int4b_t, // data type of element b (mapped to filters for fprop)
cutlass::layout::TensorNHWC, // layout of element b (mapped to filters for fprop)
int32_t, // data type of element c (mapped to output for fprop)
cutlass::layout::TensorNHWC, // layout of element c (mapped to output for fprop)
int32_t, // data type of internal accumulation
cutlass::arch::OpClassTensorOp, // opcode class tag
cutlass::arch::Sm75, // target SM architecture
cutlass::gemm::GemmShape<128, 128, 128>, // shape of threadblock tile
cutlass::gemm::GemmShape<64, 64, 128>, // shape of warp-level GEMM tile
cutlass::gemm::GemmShape<8, 8, 32>, // shape of target math instruction
cutlass::epilogue::thread::LinearCombinationClamp<
int32_t, // data type of output matrix
8, // The number of elements per vectorized
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
int32_t, // Data type of accumulator
float>; , // epilogue operator
SwizzleThreadBlock, // optional function to reorder threadblocks for locality
2, // number of pipeline stages in threadblock-scoped GEMM
cutlass::arch::OpMultiplyAddSaturate, // math operation on data of element a and b
cutlass::conv::IteratorAlgorithm::kOptimized // global memory iterator algorithm
>::Kernel
```
That is, this computes 2D convolutional forward propagation with 4-bit integer inputs and outputs (`cutlass::int4b_t`).
Internal accumulation is performed using 32-bit integers (`int32_t`), and an elementwise linear combination operation
is performed on the output in single-precision floating point (`float`).
The threadblock and warp-level tile shapes refer to the hierarchically blocked GEMM computation
[described here](/media/docs/gemm_api.md). Larger tiles achieve greater reuse of data loaded through shared memory
but launch fewer CTAs and may not fully occupy the GPU for small problem sizes. Smaller tile configurations achieve
lower peak utilizations but may better match the number of SMs within the GPU for real-world workloads.
## Launching the convolution
The following code collects the arguments for an implicit GEMM operation into a structure.
```c++
//
// Define arguments for CUTLASS Convolution
//
// mode (kCrossCorrelation or kConvolution)
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices);
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_c.device_ref(),
{options.alpha, options.beta},
};
```
The `mode` flag indicates whether to compute cross correlation or convolution. The arguments
`input_size`, `filter_size`, `padding`, `conv_stride`, and `dilation` specify the dimensions of the
input and output tensors and characterize the problem size.
The arguments `tensor_a.device_ref()`, `tensor_b.device_ref()`, and `tensor_c.device_ref()` are
CUTLASS `TensorRef<>` objects containing a pointer to the tensor data in GPU device memory and stride values.
The following code initializes and launches the Implicit GEMM operation on the device. After initializing
the arguments structure, it is used to query device-side workspace requirements and allocate them
in device memory if needed.
Then, the Implicit GEMM object is initialized with the `arguments` structure and the workspace in
device memory. This initialization step precomputes internal lookup tables used by the convolution kernel
and may also clear the device-side workspace if needed.
Finally, the initialized Implicit GEMM object is called, launching a kernel on the device. `tensor_c` now
contains the result of the implicit GEMM.
```c++
ImplicitGemm implicit_gemm_op;
// Query workspace size
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Initialize the Implicit GEMM object
cutlass::Status status = implicit_gemm_op.initialize(arguments, workspace.get());
if (status != cutlass::Status::kSuccess) {
/* error */
}
//
// Launch initialized CUTLASS kernel
//
status = implicit_gemm_op();
if (status != cutlass::Status::kSuccess) {
/* error */
}
```
The example demonstrates how the input and output tensors may be written to a file as CSV using
`cutlass::HostTensor<>` defined in the [CUTLASS Utilities](/media/docs/utilities.md).
```c++
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
// Copy device memory to host backing store
tensor_c.sync_host();
output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl;
```
## CUTLASS Components
CUTLASS defines the following CUDA C++ templates to implement Implicit GEMM Convolution which are described in greater detail in subsequent sections.
**Activations tile iterators** load the activations tile into registers. Two implementations are provided:
- [conv2d_fprop_activation_tile_access_iterator_analytic.h](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h) computes pointer deltas and masks analytically
- [conv2d_fprop_activation_tile_access_iterator_optimized.h](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h) optimizes iterating over global memory and
creating GEMM-A tile in shared memory.
**Filter tile iterators** load filters into registers. Similarly, two implementations are provided:
- [conv2d_fprop_filter_tile_access_iterator_analytic.h](/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h) computes pointer deltas and masks analytically
- [conv2d_fprop_filter_tile_access_iterator_optimized.h](/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h) optimizes iterating over global memory and
creating GEMM-B tile in shared memory.
The improvements covered by optimized iterators are:
a. Precomputing kernel-invariant pointer deltas on the host
b. Computing cta-invariant mask predicates on device-side iterator ctors
c. Use of [fast divmod](/include/cutlass/fast_math.h) to map GEMM dimensions to convolution tensors.
For example, an _optimized_ activation iterator uses fast divmod to map GEMM _M_ to NPQ.
**Pipelined mainloop** loads threadblock-scoped tiles from global memory into shared memory and then applies
CUTLASS warp-level GEMM operations to load from Shared Memory and issue instructions to Turing Tensor Cores.
- [mma_pipelined.h](/include/cutlass/conv/threadblock/implicit_gemm_pipelined.h)
Operations for storing to shared memory and performing warp-wide matrix multiply operations using
Turing Tensor Cores are applied directly from the CUTLASS GEMM components. These include the
following components.
**Regular Tile Iterator** implemented in
[transform::threadblock::RegularTileIterator](/include/cutlass/transform/threadblock/regular_tile_iterator.h)
stores register-backed fragments to Shared Memory in permuted layouts.
**Warp-level GEMM** defined in [cutlass::gemm::warp::MmaTensorOp](/include/cutlass/gemm/warp/mma_tensor_op.h)
defines tile iterators to load from Shared Memory and issue math instructions to Turing Tensor Cores.
Further details are [described in here](/media/docs/gemm_api.md#warp-level-matrix-multiply-api).
**Epilogue** reorders accumulator elements among threads within a threadblock to efficiently update
the output tensor. It is implemented in [epilogue::threadblock::Epilogue](/include/cutlass/epilogue/threadblock/epilogue.h).
### Loading Activations and Filters
The Implicit GEMM Convolution algorithm partitions the GEMM _K_ dimension (of extent _CRS_) into
threadblock tiles and assigning each threadblock tile to one filter position and an interval
of channels. After iterating over all filter positions, the convolution algorithm advances to the
next interval of channels and proceeds from filter `r=0, s=0`.
The matrix product of one threadblock tile is computed per iteration of
the mainloop as described in the [CUTLASS GEMM implementation](/media/docs/efficient_gemm.md). To
summarize, the threadblock tile of activations and filters are loaded from tensors in global memory
and stored to shared memory. Each thread within the threadblock loads one or more vectors and
collectively span the entire tile.
The following figure illustrates one particular iteration of the Implicit GEMM mainloop. Each
thread within the threadblock is mapped to several vectors of elements in the Activations and
Filters tensors. Each index in the GEMM _M_ dimension corresponds to a unique _(N,P,Q)_
index of the output tensor, and pointers may be computed based on this as well as
filter position _(r,s)_.
![ALT](/media/images/conv2d-fprop-int4.png "Convolution Forward Propagation on INT4 data.")
The CUTLASS component that embodies this functionality is [Conv2dFpropFilterTileAccessIteratorAnalytic](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h).
Its constructor computes the mapping of GEMM _M_ to _(N, P, Q)_, the `at()` method maps the linear offset into the Activations
tensor for each memory access the thread is to perform. Additionally, the method `valid()` computes the valided of the access
for each filter position and for each memory access to indicate whether the memory access will be within the bounds of the
tensor or out of bounds.
`operator++()` iterates over memory accesses performed by a thread in both contiguous and strided dimension.
```c++
// cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h
// Update iterator to thread's next contiguous, strided memory access
Conv2dFpropActivationTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
```
After all accesses have been visited for the current threadblock tile, `advance()` updates the pointers to next tile.
Offsets added to each pointer follows the traversal of filter positions, performing one of the
following:
- advance from filter position _(r, s, c)_ to filter position _(r, s+1, c)_
- advance from filter position _(r, S-1, c)_ to filter position _(r+1, 0, c)_
- advance from filter position _(R-1, S-1, c)_ to filter position _(0, 0, c+32)_
This logic within method `advance()`'s body computes the above three updates for the activation GEMM-A tile.
```c++
// cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h
// Advance to the next access
void advance() {
// moves to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
filter_c_ += Shape::kRow * problem_size_.split_k_slices;
}
```
Similar logic holds for [Conv2dFpropFilterTileAccessIteratorAnalytic](/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h).
To reduce computational overhead in the mainloop body, the pointer offsets may be precomputed
in host code and provided to the CUDA kernel as a lookup table in its `Params` structure.
As shown in [Conv2dFpropFilterTileAccessIteratorOptimized](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h),
the logic to compute offsets from filter position has been extracted to the `Params` constructor.
```c++
// cutlass/conv/threadblock/conv2d_params.h
struct Conv2dFpropActivationIteratorOptimizedParams<layout::TensorNHWC> {
...
// next S
inc_next[0] = conv_sign * (int64_t(layout.stride()[0]) * problem_size.dilation_w) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
int64_t(layout.stride()[1]) * problem_size.dilation_h
- (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next C
inc_next[2] = (
threadblock_shape.column() * problem_size.split_k_slices
- conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h
- conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
...
}
```
This allows only a simple lookup from the _delta table_ performed in device code in `Conv2dFpropActivationTileAccessIteratorOptimized::advance()`.
```c++
// cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h
CUTLASS_HOST_DEVICE
void advance() {
int next_idx = 0;
// moves to the next tile
++filter_s_;
if (filter_s_ == problem_size_.S) {
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
next_idx = 1;
}
else {
filter_r_ = 0;
next_idx = 2;
}
}
add_byte_offset_(params_.inc_next[next_idx]); // in addition to Conv2dFpropActivationTileAccessIteratorAnalytic::advance()
if (next_idx == 2) {
filter_c_ += params_.filter_c_delta;
}
}
```
### Making use of Tensor Cores
Turing Tensor Cores compute matrix multiply-accumulate operations efficiently by sharing data among all
threads within a warp. The following operations are supported.
| **Shape** | **A** | **B** | **C** |
|-----------|---------|---------|---------|
| 8x8x32 | int4b_t | int4b_t | int32_t |
| 8x8x16 | int8b_t | int8b_t | int32_t |
| 16x8x8 | half | half | half |
| 16x8x8 | half | half | float |
Functionally, the Turing 8x8x32 matrix multiply operation distributes the _A_, _B_, and _C_ matrix across 32
threads within a warp according to the following illustration.
![ALT](/media/images/mma-8x8x32.png "Turing Tensor Op")
This Tensor Core operation is accessible to the CUDA programmer via the PTX instruction
[`mma.sync`](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-8832).
CUTLASS wraps inline PTX with device-side intrinsics defined in [`cutlass/arch/mma_sm75.h`](/include/cutlass/arch/mma_sm75.h)
as in the following example.
```c++
unsigned A; // eight packed 4-bit integer elements
unsigned B; // eight packed 4-bit integer elements
int C[2]; // two 32-bit integer elements
int D[2]; // two 32-bit integer elements
asm volatile(
"mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
```
To load data efficiently from Shared Memory into registers with the distribution among
warps matching the above, the Turing GPU architecture introduces
[`ldmatrix`](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-ldmatrix).
`ldmatrix` is the ultimate warp-cooperative instruction, as all threads contribute addresses to up to 32 row vectors of
size 128-bits in length. These rows are fetched from Shared Memory and then distributed among groups of four threads
per row.
The arrangement of SMEM pointers and destination registers within threads is illustrated as follows. Thread 0 is highlighted
in the illustration to emphasize the mapping.
![ALT](/media/images/ldmatrix-8x128bx4.png "Turing ldmatrix PTX instruction")
The size of the Turing Tensor Core operation computing matrix multiply-accumulate on INT4 data is 8-by-8-by-32
elements. `ldmatrix` fetches up to 32 rows (or columns) per operation. Sixteen Tensor Core operations may be issued
to implement a 32-by-32-by-32 matrix product and perfectly consume all data loaded by two `ldmatrix` instructions
as shown in the following figure. Larger tiles are possible by increasing the number of memory instructions
and issuing more Tensor Core operations, up to warp-level matrix operations of size 64-by-64-by-32. The limit is
the number of registers to hold the accumulator elements.
![ALT](/media/images/ldmatrix-tensorop-32x32x32.png "Turing ldmatrix PTX instruction feeding Tensor Core operations")
### Shared Memory Layouts
In the previous two sections, we have described how data may be loaded from activations and filters tensors
in global memory to compute convolution, and we have described a composition of `ldmatrix` and `mma.sync`
to fetch data from Shared Memory and issue Tensor Core operations.
To ensure this data movement is efficient, care must be taken to ensure bank conflicts are avoided. CUTLASS
uses a permuted Shared Memory layout to avoid bank conflicts when storing to Shared Memory and to efficiently
load from Shared Memory using `ldmatrix`. The following figure illustrates the thread mapping used for
the loading the activations and filters threadblock tiles from global memory and the permuted layout in
Shared Memory.
![ALT](/media/images/tensor-op-permuted-smem-layout-TN.png "Shared Memory layout used for Turing Tensor Cores")
In the illustration, one warp-wide memory access is highlighted in blue, with individual threads
loading one 128-bit vector. The tile in global memory could correspond either to the activations
or filters and is assumed to be 'strip-mined' with four threads loading consecutive channels.
Shared Memory is visualized as a 'row-major' matrix with eight columns representing
the eight 128-bit banks.
As described in the CUTLASS GTC 2019 presentation [slides](https://developer.download.nvidia.com/video/gputechconf/gtc/2019/presentation/s9593-cutensor-high-performance-tensor-operations-in-cuda-v2.pdf),
[recording](https://developer.nvidia.com/gtc/2019/video/S9593), an access to Shared Memory will be conflict-free if
the following conditions are satisfied across each warp:
- {T0, T1, .., T7} do not access the same 128-bit bank
- {T8, T9, .., T15} do not access the same 128-bit bank
- {T16, T17, .., T23} do not access the same 128-bit bank
- {T24, T25, .., T31} do not access the same 128-bit bank
To achieve conflict-free stores, the Shared Memory layout remaps the strip-mined arrangement to transpose
the vectors and applies an XOR operation on the column index of each thread's pointer. Specifically,
```c++
int store_column = (lane_id % 8) ^ (lane_id / 8);
```
This transformation on the layout will be instrumental in reading slices of data from Shared Memory
to compute the warp-level matrix multiply using Tensor Cores.
The following figure shows how the first sixteen threads participating in an `ldmatrix` instruction
logically map to the c=0..31 slice of a matrix in Shared Memory. This slice is known as a "k-group"
within the code because it corresponds to the same K-index of a warp-level matrix multiply.
![ALT](/media/images/tensor-op-permuted-smem-layout-TN-k0.png "Load kgroup=0 from Shared Memory using ldmatrix")
The lower half of the figure shows the physical arrangement in Shared Memory, with threads offset by row and column
according to the XOR function. By inspection, we can observe there are no bank conflicts, as _T0 ... T7_ each access unique
banks, as do _T8 ... T15_. and beyond.
To advance to the next "k-group" within Shared Memory, pointers are updated using an XOR operation according to
the following sequence:
- **^1** advances from _k=0_ to _k=1_
- **^3** advances from _k=1_ to _k=2_
- **^1** advances from _k=2_ to _k=3_
- **^3** advances from _k=3_ to _k=0_
The first of these transitions is shown below.
![ALT](/media/images/tensor-op-permuted-smem-layout-TN-k1.png "Advance to kgroup=1 from Shared Memory using ldmatrix")
The [CUTLASS warp-level GEMM API](/media/docs/gemm_api.md#warp-level-matrix-multiply-api) defines templates for
loading slices of data from permuted Shared Memory and issuing operations to Tensor Cores.
### Updating the Output Tensor
After the mainloop terminates, the accumulator tile of the warp-level GEMM stores a warp's contribution to the output
tensor. However, the distribution of data among threads within the threadblock is specialized for efficient matrix multiply-accumulate
operations using Tensor Cores and is not conducive to efficient, coalesced operations to Global Memory. A data rearrangement is
needed.
The **Epilogue** is the component for exchanging accumulator elements through Shared Memory, loading slices of the output
matrix or tensor, applying an elementwise operation such as linear scaling or bias, and storing the result to the output tensor.
CUTLASS structures this as several components:
- [cutlass::epilogue::threadblock::Epilogue](/include/cutlass/epilogue/threadblock/epilogue.h) - the top-level component for looping over the entire threadblock tile
- [cutlass::epilogue::warp::TileIteratorTensorOp](/include/cutlass/epilogue/warp/tile_iterator_tensor_op.h) - a specialized component for storing accumulators for Tensor Core to Shared Memory
- [cutlass::epilogue::threadblock::SharedLoadIterator](/include/cutlass/epilogue/threadblock/shared_load_iterator.h) - a component for loading elements from a row-major arrangement in Shared Memory
- [cutlass::epilogue::threadblock::PredicatedTileIterator](/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h) - a component for loading or storing matrix fragments to Global Memory (with bounds checks)
- [cutlass::epilogue::thread::LinearCombination](/include/cutlass/epilogue/thread/linear_combination.h) - an element-wise function computing `alpha * AB + beta * C` to compute the final output
## Unit Tests
Unit tests verify the functional behavior of each of the above components in a standalone CUDA kernel. This provides a
convenient environment to
a. inspect the template definition,
b. showcase instantiation of use of these templates in device code, and
c. assert functional correctness.
**Convolution unit tests**
- Device-wide convolution operator: [conv2d_fprop_implicit_gemm_s4nhwc_s4nhwc_s32nhwc_tensor_op_s32_sm75.cu](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4nhwc_s4nhwc_s32nhwc_tensor_op_s32_sm75.cu)
**GEMM unit tests**
- Warp-scoped matrix multiply for Turing Tensor Cores: [gemm_sm75.cu](/test/unit/gemm/warp/gemm_sm75.cu)
**Epilogue unit tests**
- Epilogue for Turing Tensor Cores: [epilogue_tensor_op.cu](/test/unit/epilogue/threadblock/epilogue_tensor_op.cu)
# Convolution Example
This section describes the provided convolution example and is intended to orient the reader to the CUTLASS implementation
of Implicit GEMM Convolution.
## Building and Running the Example
Example `09_turing_tensorop_conv2dfprop` computes a forward convolutional layer in which inputs and
outputs are 4-b integers. The example source is visible in
[examples/09_turing_tensorop_conv2dfprop/turing_tensorop_conv2dfprop.cu](/examples/09_turing_tensorop_conv2dfprop/turing_tensorop_conv2dfprop.cu).
Before building the example, first perform the prerequisite steps for building any CUTLASS component [described here](/media/docs/quickstart.md).
Compute capability 7.5 refers to the Turing architecture, and this work requires CUDA 10.2 Toolkit or later to target
Turing Tensor Cores using the native `mma` [PTX instruction](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-8832).
```bash
$ mkdir build && cd build
$ cmake .. -DCUTLASS_NVCC_ARCHS=75
```
To build the example, execute `make 09_turing_tensorop_conv2dfprop` from the build directory.
```bash
$ make 09_turing_tensorop_conv2dfprop
$ ls examples/09_turing_tensorop_conv2dfprop
examples/09_turing_tensorop_conv2dfprop
```
This example provides a simple command line interface to specify the extents of 4D tensors of 4-bit integer elements (`cutlass::int4b_t`),
initialize them to random values, and compute the result of a convolutional layer. Optionally, the input and output
tensors may be saved to .csv files, and the CUTLASS host-side reference check may be executed to verify correctness.
The complete usage statement is visible by running with `--help`:
```bash
$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --help
09_turing_tensorop_conv2dfprop example
This example uses Turing's Tensor Core operators on int4 data types to compute
forward convolution on tensors of layout NHWC.
Options:
--help If specified, displays this usage statement.
--n <int> Input tensor extent N
--h <int> Input tensor extent H
--w <int> Input tensor extent W
--c <int> Input tensor extent C
--k <int> Filter extent K
--r <int> Filter extent R
--s <int> Filter extent S
--alpha <float> Epilogue scalar alpha
--beta <float> Epilogue scalar beta
--ref-check If set (true), reference check on the host is computed
--perf-check If set (true), performance is measured.
--benchmark If set (true), performance benchmarking on several layers and batch-size.
--iterations <int> Number of profiling iterations to perform.
--save-workspace If set, workspace is written to a text file.
--tag <string> String to replicate across the first column in the results table
Examples:
$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1
$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check
```
*Note*, this example assumes all tensors are 128b aligned and in format _NHWC_. Consequently, dimension
_C_ must be divisible by 32 for activations, filters, and output.
If the option `--benchmark` is passed, several layers from ResNet50 are profiled for various batch sizes.
This sample output was computed on an NVIDIA RTX 2080 compiled with CUDA 10.2.
```bash
build$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --benchmark
```
Convolution can also be run by the CUTLASS Profiler.
# Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| media/docs/implicit_gemm_convolution.md/0 | {
"file_path": "media/docs/implicit_gemm_convolution.md",
"repo_id": "media",
"token_count": 12488
} | 50 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import ctypes
import json
import os
import sqlite3
import subprocess
import tempfile
from cuda import cuda, nvrtc
from cutlass_library import SubstituteTemplate
import cutlass
from cutlass import CACHE_FILE, CUTLASS_PATH, cuda_install_path, logger
from cutlass.backend.gemm_operation import GemmOperationUniversal
from cutlass.backend.library import ApiVersion
from cutlass.backend.utils.device import device_cc
IncludeTemplate = r"""#include "${include}"
"""
def compile_with_nvcc(cmd, source, error_file):
succeed = True
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error_message = e.output.decode()
with open(error_file, "w") as error_out:
error_log = "Compilation error for the following kernel: \n"
error_log += source
error_log += "\nError Message:\n"
error_log += error_message
error_out.write(error_log)
succeed = False
if not succeed:
# Print the error log to stdout if log level is set to warning or higher
# verbosity. Otherwise, simply point to the error log file.
logger.warning(error_log)
raise Exception(f"Invalid Kernel. See '{error_file}' for details.")
class CompilationOptions:
"""
Compilation options.
"""
def __init__(self, flags, arch, include_paths=[]):
self.includes = []
self.include_paths = include_paths
self.flags = flags
self.arch = arch
def get_str(self):
opts = []
for flag in self.flags:
opts.append(flag)
for incl in self.include_paths:
opts.append(f"--include-path={incl}")
arch_flag = f"-arch=sm_{self.arch}"
if self.arch == 90:
arch_flag += "a"
opts.append(arch_flag)
return " ".join(opts)
def get(self):
options = []
for flag in self.flags:
options.append(bytes(str.encode(flag)))
for incl in self.include_paths:
options.append(bytes(str.encode(f" --include-path={incl}")))
arch_flag = f" -arch=sm_{self.arch}"
if self.arch == 90:
arch_flag += "a"
options.append(bytes(str.encode(arch_flag)))
return options
def convertToBinaryData(filename):
with open(filename, "rb") as file:
blobData = file.read()
return blobData
def CDLLBin(host_binary):
tempfile.tempdir = "./"
temp_so = tempfile.NamedTemporaryFile(prefix="host_func", suffix=".so", delete=True)
with open(temp_so.name, "wb") as file:
file.write(host_binary)
host_lib = ctypes.CDLL(temp_so.name)
return host_lib
class ArtifactManager:
"""
Artifact manager
"""
def __init__(self) -> None:
connection = sqlite3.connect(CACHE_FILE)
cursor = connection.cursor()
# Create the table if it does not already exist
sqlite_create_table_query = """
CREATE TABLE IF NOT EXISTS compiled_operations(op_key TEXT NOT NULL UNIQUE,
cubin BLOB NOT NULL,
hostbin BLOB NOT NULL,
op_name TEXT NOT NULL,
op_attrs TEXT NOT NULL)
"""
cursor.execute(sqlite_create_table_query)
connection.commit()
cursor.close()
self._nvrtc_compile_options = ["-std=c++17", "-default-device"]
self._nvcc_compile_options = [
"-std=c++17",
"--expt-relaxed-constexpr",
"-Xcudafe --diag_suppress=esa_on_defaulted_function_ignored",
]
self.nvcc()
self.compiled_cache_device = {}
self.compiled_cache_host = {}
def nvrtc(self):
self.backend = "nvrtc"
self.default_compile_options = self._nvrtc_compile_options
def nvcc(self):
self.backend = "nvcc"
self.default_compile_options = self._nvcc_compile_options
def insert_operation(self, op_key, cubin, hostfile, op_name, op_attrs):
connection = sqlite3.connect(CACHE_FILE)
cursor = connection.cursor()
sqlite_insert_blob_query = """ INSERT OR IGNORE INTO compiled_operations (op_key, cubin, hostbin, op_name, op_attrs) VALUES (?, ?, ?, ?, ?)"""
hostbin = convertToBinaryData(hostfile)
data_tuple = (op_key, cubin, hostbin, op_name, json.dumps(op_attrs))
cursor.execute(sqlite_insert_blob_query, data_tuple)
connection.commit()
cursor.close()
def load_operation(self, op_key, extra_funcs):
connection = sqlite3.connect(CACHE_FILE)
cursor = connection.cursor()
sqlite_fetch_blob_query = """SELECT * from compiled_operations where op_key = ?"""
cursor.execute(sqlite_fetch_blob_query, (op_key,))
record = cursor.fetchall()
if len(record) == 0:
return False
for row in record:
key, cubin_image, host_binary, operation_name, op_attr = row
op_attr = json.loads(op_attr)
err, module = cuda.cuModuleLoadData(cubin_image)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Cuda Error: {}".format(err))
err, kernel = cuda.cuModuleGetFunction(module, bytes(str.encode(operation_name)))
self.compiled_cache_device[key] = kernel
compiled_host_fns = {}
host_lib = CDLLBin(host_binary)
func_name = operation_name + "_get_params"
func = getattr(host_lib, func_name)
func.restype = ctypes.POINTER(ctypes.c_char * op_attr[0])
compiled_host_fns["get_args"] = func
func_name = operation_name + "_shared_memory_size"
func = getattr(host_lib, func_name)
compiled_host_fns["shared_memory_capacity"] = func()
for attr in op_attr:
if isinstance(attr, str):
func_name = operation_name + "_" + attr
func = getattr(host_lib, func_name)
# Set the return type of the function
if attr in extra_funcs and extra_funcs[attr] != None:
func.restype = extra_funcs[attr]
compiled_host_fns[attr] = func
self.compiled_cache_host[key] = compiled_host_fns
return True
def emit_compile_(self, operation_list, compilation_options, host_compilation_options):
"""
Compile a list of kernels and store them into database
"""
source_buffer_device = ""
source_buffer_host = ""
# 1. include
includes = []
for operation in operation_list:
for incl in operation.emitter.includes:
if incl not in includes:
includes.append(incl)
includes_host = ["builtin_types.h", "device_launch_parameters.h", "stddef.h"] + includes
for incl in includes:
source_buffer_device += SubstituteTemplate(
IncludeTemplate,
{"include": incl},
)
for incl in includes_host:
source_buffer_host += SubstituteTemplate(
IncludeTemplate,
{"include": incl},
)
# 2. Operations
for operation in operation_list:
source_buffer_device += operation.emit()
source_buffer_host += operation.emit()
values = {
"operation_name": operation.name(),
"operation_suffix": operation.emitter.operation_suffix,
}
source_buffer_device += SubstituteTemplate(
operation.KernelTemplate,
values,
)
source_buffer_host += SubstituteTemplate(operation.HostTemplate, values)
if self.backend == "nvrtc":
# 3. compile
err, program = nvrtc.nvrtcCreateProgram(
str.encode(source_buffer_device),
bytes(str.encode("module.cu")),
0, [], [])
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError("NVRTC Error: {}".format(err))
# Compile program
options = compilation_options.get()
err, = nvrtc.nvrtcCompileProgram(program, len(options), options)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
error_string = "NVRTC Error: {}\n".format(err)
# Get log from compilation
err, logSize = nvrtc.nvrtcGetProgramLogSize(program)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError("NVRTC Error: {}".format(err))
log = b" " * logSize
err, = nvrtc.nvrtcGetProgramLog(program, log)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError("NVRTC Error: {}".format(err))
raise RuntimeError(error_string + log.decode() + source_buffer_device)
# Get data from compilation
err, dataSize = nvrtc.nvrtcGetCUBINSize(program)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError("NVRTC Error: {}".format(err))
cubin_image = b" " * dataSize
(err,) = nvrtc.nvrtcGetCUBIN(program, cubin_image)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError("NVRTC Error: {}".format(err))
else: # with nvcc backend
# emit code
tempfile.tempdir = "./"
temp_cu = tempfile.NamedTemporaryFile(
prefix="kernel", suffix=".cu", delete=True)
temp_cubin = tempfile.NamedTemporaryFile(
prefix="kernel", suffix=".cubin", delete=True)
with open(temp_cu.name, "w") as file:
file.write(source_buffer_device)
# compile with nvcc
cmd_template = "${cuda_install_path}/bin/nvcc ${options} -cubin ${srcfile} -o ${tarfile}"
values = {
"cuda_install_path": cuda_install_path(),
"options": compilation_options.get_str(),
"srcfile": temp_cu.name,
"tarfile": temp_cubin.name,
}
cmd = SubstituteTemplate(cmd_template, values)
compile_with_nvcc(cmd.split(" "), source_buffer_device, "./cutlass_python_compilation_device_error.txt")
# load the cubin image
with open(temp_cubin.name, "rb") as file:
cubin_image = file.read()
tempfile.tempdir = "./"
temp_src = tempfile.NamedTemporaryFile(
prefix="host_src", suffix=".cu", delete=True)
# Write the host source
with open(temp_src.name, "w") as outfile:
outfile.write(source_buffer_host)
temp_dst = tempfile.NamedTemporaryFile(
prefix="host_func", suffix=".so", delete=True)
# Set up host compilation arguments
cmd = []
cmd.append(f"{cuda_install_path()}/bin/nvcc")
cmd.extend(["-x", "cu", "-Xcompiler=-fpermissive", "-Xcompiler=-w", "-Xcompiler=-fPIC"])
cmd.extend(host_compilation_options.get_str().split(" "))
cmd.extend(["-shared", "-o", temp_dst.name, temp_src.name, "-lcudart", "-lcuda"])
# Comile and load the library
compile_with_nvcc( cmd, source_buffer_host, error_file="./cutlass_python_compilation_host_error.txt")
host_lib = ctypes.CDLL(temp_dst.name)
return cubin_image, host_lib, temp_dst
def add_module(self, operations, compile_options=None, bypass_cache=False):
"""
Insert a new compiled device module
"""
include_paths = [
cuda_install_path() + "/include",
CUTLASS_PATH + "/include",
CUTLASS_PATH + "/tools/util/include",
CUTLASS_PATH + "/python/cutlass/cpp/include",
]
cutlass.initialize_cuda_context()
arch = device_cc()
host_compile_options = CompilationOptions(
self._nvcc_compile_options, arch, include_paths)
if compile_options is None:
compile_options = CompilationOptions(
self.default_compile_options, arch, include_paths)
# save the cubin
operation_key = []
operation_list = []
for operation in operations:
# step 1: get kernel string as key
key = operation.rt_module.emit() + operation.procedural_name() + self.backend
# step 1: check if the operation is in cache
compiled_kernel = self.compiled_cache_device.get(key)
if compiled_kernel is None and not bypass_cache:
hit = self.load_operation(key, getattr( operation.rt_module, "extra_funcs", {}))
if hit:
compiled_kernel = self.compiled_cache_device.get(key)
assert compiled_kernel is not None
if compiled_kernel is not None:
operation.rt_module.kernel = compiled_kernel
compiled_host_fns = self.compiled_cache_host.get(key)
assert compiled_host_fns is not None
for key in compiled_host_fns.keys():
setattr(operation.rt_module, key, compiled_host_fns[key])
operation.rt_module.initialize()
else:
operation_list.append(operation.rt_module)
operation_key.append(key)
if len(operation_list) > 0:
cubin_image, host_lib, host_file = self.emit_compile_(
operation_list, compile_options, host_compile_options)
err, module = cuda.cuModuleLoadData(cubin_image)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Cuda Error: {}".format(err))
operation_name = []
operation_attr = []
for operation, key in zip(operation_list, operation_key):
# get device kernels
err, operation.kernel = cuda.cuModuleGetFunction(
module,
bytes(str.encode(operation.name()))
)
operation_name.append(operation.name())
self.compiled_cache_device[key] = operation.kernel
# get host functions
compiled_host_fns = {}
op_attr = []
# get param size
func_name = operation.name() + "_get_param_size"
func = getattr(host_lib, func_name)
param_size = func()
func_name = operation.name() + "_get_params"
func = getattr(host_lib, func_name)
func.argtype = operation.argtype
func.restype = ctypes.POINTER(ctypes.c_char * param_size)
setattr(operation, "get_args", func)
compiled_host_fns["get_args"] = func
# set shared memory size
func_name = operation.name() + "_shared_memory_size"
func = getattr(host_lib, func_name)
setattr(operation, "shared_memory_capacity", func())
compiled_host_fns["shared_memory_capacity"] = func()
# set the maximum dynamic shared size
operation.initialize()
# get extra functions
op_attr.append(param_size)
if hasattr(operation, "extra_funcs"):
for suffix, ret_type in operation.extra_funcs.items():
func_name = operation.name() + "_" + suffix
func = getattr(host_lib, func_name)
if ret_type is not None:
func.restype = ret_type
setattr(operation, suffix, func)
compiled_host_fns[suffix] = func
op_attr.append(suffix)
operation_attr.append(op_attr)
self.compiled_cache_host[key] = compiled_host_fns
for (key, operation_name, operation_attr,) in zip(operation_key, operation_name, operation_attr):
self.insert_operation(
key, cubin_image, host_file.name, operation_name, operation_attr)
| python/cutlass/backend/compiler.py/0 | {
"file_path": "python/cutlass/backend/compiler.py",
"repo_id": "python",
"token_count": 8423
} | 51 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
DAG IR used by Python EVT
"""
import networkx as nx
from cutlass_library import DataType
from cutlass.backend.evt.ir.node import NodeBase
from cutlass.backend.utils import device_cc
class DAGIR:
"""
``DAGIR`` is the main data structure used in the EVT Intermediate Representation.
It consists of a series of ``Node`` s, each representing epilogue visitor nodes.
In the DAGIR, ``node`` is an string of its name. ``node_meta`` is the underlying class of the node
"""
def __init__(self, element_compute=DataType.f32, cc: int=None) -> None:
# The EVT DAGIR is managed through the nextworkX Digraph class
self._graph = nx.DiGraph()
self.element_compute = element_compute
self.reduction_names = []
self.cc = cc if cc else device_cc()
#
# IR manipulator
#
def add_node(self, meta: NodeBase):
"""
Add a node to dag ir
"""
if self.has_node(meta.name):
raise SyntaxError(f"Variable '{meta.name}' cannot be defined twice.")
self._graph.add_node(meta.name, meta=meta)
def add_edge(self, src: str, dst: str, weight: int=0):
"""
Add an edge src -> dst to dag ir with weight
"""
if not self.has_node(src):
raise SyntaxError(f"Variable '{src}' is undefined.")
if not self.has_node(dst):
raise SyntaxError(f"Variable '{dst}' is undefined.")
self._graph.add_edge(src, dst, weight=weight)
def remove_node(self, node: str):
"""
Remove node from dag ir
"""
self._graph.remove_node(node)
def remove_edge(self, src: str, dst: str):
"""
Remove edge src -> dst
"""
self._graph.remove_edge(src, dst)
#
# Helper functions for getting attrs
#
def has_node(self, node: str) -> bool:
"""
Check if the node is in the graph
"""
return self._graph.has_node(node)
def in_degree(self, node: str):
"""
Get the input degree of node
"""
return self._graph.in_degree(node)
def in_edges(self, node: str):
"""
Get the input edges of node
"""
return [edge for edge in self._graph.in_edges(node)]
def out_degree(self, node: str):
"""
Get the output degree of node
"""
return self._graph.out_degree(node)
def out_edges(self, node: str):
"""
Get the output edges of node
"""
return [edge for edge in self._graph.out_edges(node)]
def get_node_meta(self, node: str):
"""
Get the meta data of the node
"""
return self._graph.nodes[node]["meta"]
def get_edge_weight(self, src, dst):
"""
Get the edge weight of edge src->dst
"""
return self._graph.get_edge_data(src, dst)["weight"]
#
# High-level helper functions
#
def all_reachable_nodes(self, node: str):
"""
Get all the nodes reachable from the current node (exclude)
"""
return list(nx.dfs_preorder_nodes(self._graph, source=node))
def get_users(self, node: str):
"""
Get all users of the current node
"""
return [edge[1] for edge in self.out_edges(node)]
def get_all_inputs(self, node: str):
"""
Get all the input nodes sorted by edge weight
"""
in_edges = self.in_edges(node)
edge_weights = [self.get_edge_weight(*edge) for edge in in_edges]
return [edge[0] for _, edge in sorted(zip(edge_weights, in_edges))]
def get_all_inputs_meta(self, node: str):
"""
Get all the input node metas sorted by edge weight
"""
return [self.get_node_meta(input_node) for input_node in self.get_all_inputs(node)]
def replace_all_uses_with(self, node1, node2):
"""
Replace all uses of node1 with node2
"""
for edge in self.out_edges(node1):
weight = self.get_edge_weight(*edge)
user = edge[1]
self.add_edge(node2, user, weight)
self.remove_edge(node1, user)
self.remove_node(node1)
#
# Node accessor
#
def nodes_topological_order(self):
"""
Get the nodes in the unique lexicographical topological order
It generates a unique ordering of nodes by first sorting topologically
and then additionally by sorting lexicographically.
Although topological_sort alone also works, this generates a unique key
for each epilogue visitor pattern and ensures the compilation cache can be reused.
:return: list[str]
"""
return list(nx.lexicographical_topological_sort(self._graph))
def node_metas_topological_order(self):
"""
Get the node metas in topological order
:return: list[NodeBase]
"""
return [self.get_node_meta(node) for node in self.nodes_topological_order()]
@property
def nodes(self):
"""
Get all nodes
:return: list[str]
"""
return list(self._graph.nodes)
@property
def nodes_meta(self):
"""
Get all node metas
:return: list[NodeBase]
"""
return [data[1]['meta'] for data in self._graph.nodes.data()]
@property
def edges(self):
"""
Get all edges
:return: list[(str, str)]
"""
return list(self._graph.edges)
#
# Path
#
def has_path(self, src: str, target: str) -> bool:
"""
Return True is a path exists from src to target
"""
return nx.has_path(self._graph, src, target)
| python/cutlass/backend/evt/ir/dag_ir.py/0 | {
"file_path": "python/cutlass/backend/evt/ir/dag_ir.py",
"repo_id": "python",
"token_count": 2967
} | 52 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Preprocess the reduction nodes.
The parser treats reduction as Compute(op=(reg_reduce_fn, gmem_reduce_fn)) - Store()
This pass fuses these into a single store node, and then replaces all uses of the
current node with the new store node.
"""
from cutlass.backend.evt.ir import ComputeNode, StoreNode
from cutlass.backend.evt.passes.pass_manager import EVTPassBase
class PassPreprocessRed(EVTPassBase):
"""
Preprocess red nodes
"""
def call(self):
# Step 1: find the compute nodes with op=red
red_compute_nodes = []
for node_meta in self.dag_ir.nodes_meta:
if isinstance(node_meta, ComputeNode):
if type(node_meta.fn) == tuple:
# To keep the frontend simple, the reduction nodes
# are parsed into compute nodes by default
# The simple heuristic to distinguish between compute
# and reduction node is that compute node is a single function,
# while the reduction node is a tuple of functions for
# in-register reduction and atomic global memory reduction
red_compute_nodes.append(node_meta.name)
# Step 2: for each compute, merge it with the succeeding store
for node in red_compute_nodes:
# Verify
users = self.dag_ir.get_users(node)
inputs = self.dag_ir.get_all_inputs(node)
# Has a single user
assert len(users) == 1
assert len(inputs) == 1
user = users[0]
input = inputs[0]
user_meta = self.dag_ir.get_node_meta(user)
# Must be a store node
assert isinstance(user_meta, StoreNode)
# With output degree == 0
assert self.dag_ir.out_degree(user) == 0
# Register the reduce op
node_meta = self.dag_ir.get_node_meta(node)
user_meta.reg_reduce_fn, user_meta.gmem_reduce_fn = node_meta.fn
user_meta.element_compute = node_meta.element_compute
user_meta.round_style = node_meta.round_style
# Replace all uses
self.dag_ir.remove_edge(input, node)
input_users = self.dag_ir.get_users(input)
for iu in input_users:
weight = self.dag_ir.get_edge_weight(input, iu)
self.dag_ir.add_edge(user, iu, weight)
self.dag_ir.remove_edge(input, iu)
self.dag_ir.add_edge(input, user)
self.dag_ir.remove_node(node)
# Register the reduction name
self.dag_ir.reduction_names.append(user)
| python/cutlass/backend/evt/passes/pass_preprocess_red.py/0 | {
"file_path": "python/cutlass/backend/evt/passes/pass_preprocess_red.py",
"repo_id": "python",
"token_count": 1697
} | 53 |