text
stringlengths 27
947k
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
80
|
---|---|---|---|
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/layout/tensor.h"
#include "cutlass/arch/mma.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/dispatch_policy.hpp"
#include "cutlass/detail/layout.hpp"
#include "cutlass/gemm/collective/builders/sm90_common.inl"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::conv::collective::detail {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Maps a rank-1 cute::Shape<> representing the cluster shape on to the IM2COL TMA atom that should be used with it
template <class UnimodalClusterShape>
constexpr auto
sm90_cluster_shape_to_im2col_tma_atom(UnimodalClusterShape unimodal_cluster_shape) {
static_assert(cute::rank(unimodal_cluster_shape) == 1,
"Use this function to figure out TMA for each mode individually.");
if constexpr (cute::size(unimodal_cluster_shape) == 1) {
return cute::SM90_TMA_LOAD_IM2COL{};
}
else {
return cute::SM90_TMA_LOAD_IM2COL_MULTICAST{};
}
}
// Collective tile traits struct that serves as a type list containing a tensor's mem layouts and atoms for the
template<
class GmemTiledCopy_,
class SmemLayout_,
class SmemCopyAtom_ = void
>
struct Sm90ImplicitGemmTileTraits {
using GmemTiledCopy = GmemTiledCopy_;
using SmemLayout = SmemLayout_;
using SmemCopyAtom = SmemCopyAtom_;
};
// Accepts a cutlass::layout::Tensor tag and computes the corresponding spatial dimension count
template <class GmemLayoutTagA, class GmemLayoutTagB>
constexpr int
gmem_layout_tags_to_spatial_dims() {
static_assert(cute::is_same_v<GmemLayoutTagA, GmemLayoutTagB>);
if constexpr (cute::is_same_v<GmemLayoutTagA, cutlass::layout::TensorNWC>) {
return 1;
}
else if constexpr (cute::is_same_v<GmemLayoutTagA, cutlass::layout::TensorNHWC>) {
return 2;
}
else if constexpr (cute::is_same_v<GmemLayoutTagA, cutlass::layout::TensorNDHWC>) {
return 3;
}
else {
static_assert(cutlass::detail::dependent_false<GmemLayoutTagA>);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::conv::collective::detail
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/collective/builders/sm90_common.inl/0 | {
"file_path": "include/cutlass/conv/collective/builders/sm90_common.inl",
"repo_id": "include",
"token_count": 1192
} | 18 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions for threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/conv/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_reduction.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/threadblock/conv2d_tile_iterator.h"
#include "cutlass/conv/threadblock/implicit_gemm_pipelined.h"
#include "cutlass/conv/threadblock/implicit_gemm_multistage.h"
#include "cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h"
#include "cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h"
#include "cutlass/conv/kernel/implicit_gemm_convolution.h"
#include "cutlass/conv/kernel/implicit_gemm_convolution_fusion.h"
#include "cutlass/conv/kernel/implicit_gemm_convolution_strided_dgrad.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename ArchTag,
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename OutputOp
>
struct DefaultConvEpilogue {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
OutputOp::kCount
>::Epilogue;
};
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename OutputOp
>
struct DefaultConvEpilogue<
arch::Sm70,
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp
> {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
OutputOp::kCount
>::Epilogue;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ArchTag,
typename Shape,
typename WarpMmaSimt,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithBroadcastSimt {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastSimt<
Shape,
WarpMmaSimt,
ElementOutput,
ElementTensor,
ElementVector,
OutputOp,
ElementsPerAccess
>::Epilogue;
};
template <
typename ArchTag,
typename Shape,
typename WarpMmaSimt,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithBroadcastSimtStridedDgrad {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastSimtStridedDgrad<
Shape,
WarpMmaSimt,
ElementOutput,
ElementTensor,
ElementVector,
OutputOp,
ElementsPerAccess
>::Epilogue;
};
template <
typename ArchTag,
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithBroadcastTensorOp {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
ElementTensor,
ElementVector,
OutputOp,
ElementsPerAccess
>::Epilogue;
};
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithBroadcastTensorOp<
arch::Sm70,
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
ElementTensor,
ElementVector,
OutputOp,
ElementsPerAccess
> {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastVoltaTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
ElementTensor,
ElementVector,
OutputOp,
ElementsPerAccess
>::Epilogue;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ArchTag,
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename OutputOp,
typename ReductionOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithReductionTensorOp {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
ElementsPerAccess
>::Epilogue;
};
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename OutputOp,
typename ReductionOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithReductionTensorOp<
arch::Sm70,
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
ElementsPerAccess
> {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithReductionVoltaTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
ElementsPerAccess
>::Epilogue;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Defaults for strided Dgrad
template <
typename ArchTag,
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename OutputOp
>
struct DefaultConvEpilogueStridedDgrad {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOpStridedDgrad<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
OutputOp::kCount
>::Epilogue;
};
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename OutputOp
>
struct DefaultConvEpilogueStridedDgrad<
arch::Sm70,
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp
> {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueVoltaTensorOpStridedDgrad<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
OutputOp::kCount
>::Epilogue;
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_conv2d.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_conv2d.h",
"repo_id": "include",
"token_count": 2756
} | 19 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kStrided
>
class Conv3dDgradOutputGradientTileAccessIteratorAnalytic;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv3dDgradOutputGradientTileAccessIteratorAnalytic strided dgrad needs special handling using
// unscaled coordinations
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dDgradOutputGradientTileAccessIteratorAnalytic <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kStrided
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or greater.");
//
// Simpligying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
struct Params {
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
ConvProblemSize const &problem_size,
Layout const &layout
): layout(layout) {
}
};
private:
Params const ¶ms_;
ConvProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
int filter_k_;
int filter_t_;
int filter_r_;
int filter_s_;
int offset_n_[ThreadMap::Iterations::kStrided];
int offset_d_[ThreadMap::Iterations::kStrided];
int offset_w_[ThreadMap::Iterations::kStrided];
int offset_h_[ThreadMap::Iterations::kStrided];
private:
/// Returns the coordinate in the output tensor Dy that is currently pointed to
/// by the iterator but DOES NOT scale by the convolution stride. This is needed
/// to compute predicates in the valid() method. The return value of the public at()
/// method is correctly scaled.
CUTLASS_HOST_DEVICE
TensorCoord unscaled_at_() const {
int n = offset_n_[iteration_strided_];
int d = offset_d_[iteration_strided_];
int h = offset_h_[iteration_strided_];
int w = offset_w_[iteration_strided_];
int t = filter_t_;
int r = filter_r_;
int s = filter_s_;
if (problem_size_.mode == Mode::kConvolution) {
t = (problem_size_.T - 1 - t);
r = (problem_size_.R - 1 - r);
s = (problem_size_.S - 1 - s);
}
int z = (d + problem_size_.pad_d - t * problem_size_.dilation_d);
int p = (h + problem_size_.pad_h - r * problem_size_.dilation_h);
int q = (w + problem_size_.pad_w - s * problem_size_.dilation_w);
return TensorCoord(n, z, p, q, filter_k_);
}
public:
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientTileAccessIteratorAnalytic(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // threadblock offset - units are whole CTA tiles
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_k_(0),
filter_t_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
int offset_ndhw = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
offset_n_[s] = offset_ndhw / (problem_size_.D * problem_size_.H * problem_size_.W);
int residual = offset_ndhw % (problem_size_.D * problem_size_.H * problem_size_.W);
offset_d_[s] = residual / (problem_size_.H * problem_size_.W);
residual = residual % (problem_size_.H * problem_size_.W);
offset_h_[s] = residual / problem_size_.W;
offset_w_[s] = residual % problem_size_.W;
}
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// move to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
return;
}
filter_t_ = 0;
filter_k_ += Shape_::kColumn * problem_size_.split_k_slices;
}
/// Returns the coordinate in the output tensor Dy that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
TensorCoord coord = unscaled_at_();
return TensorCoord(
coord.n(),
coord.d() / problem_size_.stride_d,
coord.h() / problem_size_.stride_h,
coord.w() / problem_size_.stride_w,
coord.c());
}
/// Returns true if the current coordinate is within the output tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord unscaled_coord = unscaled_at_();
TensorCoord coord = at();
return
!(unscaled_coord.d() % problem_size_.stride_d) &&
!(unscaled_coord.h() % problem_size_.stride_h) &&
!(unscaled_coord.w() % problem_size_.stride_w) &&
coord.n() < problem_size_.N &&
coord.d() >= 0 && coord.d() < problem_size_.Z &&
coord.h() >= 0 && coord.h() < problem_size_.P &&
coord.w() >= 0 && coord.w() < problem_size_.Q &&
coord.c() < problem_size_.K;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 3703
} | 20 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Basic include for CUTLASS.
*/
#pragma once
#include "cutlass/detail/helper_macros.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/// Status code returned by CUTLASS operations
enum class Status {
kSuccess, ///< Operation was successful.
kErrorMisalignedOperand, ///< operands fail alignment requirements.
kErrorInvalidDataType, ///< DataType fails requirement.
kErrorInvalidLayout, ///< Layout fails alignment requirement.
kErrorInvalidProblem, ///< Specified problem size is not supported by operator.
kErrorNotSupported, ///< Operation is not supported on current device.
kErrorWorkspaceNull, ///< The given workspace is null when it is required to be non-null.
kErrorInternal, ///< An error within CUTLASS occurred.
kErrorArchMismatch, ///< CUTLASS runs on a device that it was not compiled for.
kErrorInsufficientDriver, ///< CUTLASS runs with a driver that is too old.
kErrorMemoryAllocation, ///< Kernel launch failed due to insufficient device memory.
kInvalid ///< Status is unspecified.
};
/// Convert cutlass status to status strings
CUTLASS_HOST_DEVICE
static char const* cutlassGetStatusString(cutlass::Status status) {
switch (status) {
case cutlass::Status::kSuccess:
return "Success";
case cutlass::Status::kErrorMisalignedOperand:
return "Error Misaligned Operand";
case cutlass::Status::kErrorInvalidDataType:
return "Error Invalid Data Type";
case cutlass::Status::kErrorInvalidLayout:
return "Error Invalid Layout";
case cutlass::Status::kErrorInvalidProblem:
return "Error Invalid Problem";
case cutlass::Status::kErrorNotSupported:
return "Error Not Supported";
case cutlass::Status::kErrorWorkspaceNull:
return "Error Workspace Null";
case cutlass::Status::kErrorInternal:
return "Error Internal";
case cutlass::Status::kErrorInsufficientDriver:
return "Error Insufficient Driver";
case cutlass::Status::kErrorArchMismatch:
return "Error Architecture Mismatch";
case cutlass::Status::kErrorMemoryAllocation:
return "Error Memory Allocation failed";
case cutlass::Status::kInvalid: break;
}
return "Invalid status";
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static const int NumThreadsPerWarp = 32;
static const int NumThreadsPerWarpGroup = 128;
static const int NumWarpsPerWarpGroup = NumThreadsPerWarpGroup / NumThreadsPerWarp;
static const int NumThreadsPerHalfWarp = NumThreadsPerWarp / 2;
static const int NumThreadsPerQuad = 4;
static const int NumThreadsPerQuadPair = NumThreadsPerQuad * 2;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper function to return true when called by thread 0 of threadblock 0.
CUTLASS_HOST_DEVICE bool thread0() {
#if defined(__CUDA_ARCH__)
return (!threadIdx.x && !threadIdx.y && !threadIdx.z) && (!blockIdx.x && !blockIdx.y && !blockIdx.z);
#else
return false;
#endif
}
/// Returns a lane index in the warp. The threads in warp may not be convergent
CUTLASS_DEVICE
int canonical_lane_idx() {
#if defined(__CUDA_ARCH__)
return threadIdx.x % NumThreadsPerWarp;
#else
return 0;
#endif
}
/// Returns a warp-uniform value indicating the canonical warp index of the calling threads.
/// Threads within the warp must be converged.
CUTLASS_DEVICE
int canonical_warp_idx_sync() {
#if defined(__CUDA_ARCH__)
return __shfl_sync(0xffffffff, threadIdx.x / NumThreadsPerWarp, 0);
#else
return 0;
#endif
}
/// Returns a warp index in the CTA. The threads in warp may not be convergent
/// As it doesn't sync the warp, it faster and allows forward progress
CUTLASS_DEVICE
int canonical_warp_idx() {
#if defined(__CUDA_ARCH__)
return threadIdx.x / NumThreadsPerWarp;
#else
return 0;
#endif
}
/// Returns a warp-uniform value indicating the canonical warp group index of the calling threads.
/// Threads within the warp must be converged.
CUTLASS_DEVICE
int canonical_warp_group_idx() {
#if defined(__CUDA_ARCH__)
return __shfl_sync(0xffffffff, threadIdx.x / NumThreadsPerWarpGroup, 0);
#else
return 0;
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/cutlass.h/0 | {
"file_path": "include/cutlass/cutlass.h",
"repo_id": "include",
"token_count": 1922
} | 21 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing pipelined epilogues with bias add and elementwise activation functions.
This collective is now DEPRECATED, will be removed in the next release. Use EVT instead.
*/
#pragma once
#include "sm90_epilogue_tma_warpspecialized.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace collective {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int StagesC_,
int StagesD_,
int FragmentSize_,
class BlockTileShape_, // (BLK_M,BLK_N,BLK_K)
class EpilogueTileShape_, // (EPI_TILE_M,EPI_TILE_N)
class ElementC_,
class StrideC_,
class ElementD_,
class StrideD_,
class FusionCallbacks_,
class CopyOpG2S_,
class SmemLayoutAtomC_,
class CopyOpS2R_,
class CopyOpS2G_,
class SmemLayoutAtomD_,
class CopyOpR2S_
>
class Sm90EpilogueTmaWarpSpecializedBiasElementwise
: public CollectiveEpilogue<
Sm90TmaWarpSpecialized<StagesC_, StagesD_, FragmentSize_, false, false>,
BlockTileShape_,
EpilogueTileShape_,
ElementC_,
StrideC_,
ElementD_,
StrideD_,
FusionCallbacks_,
CopyOpG2S_,
SmemLayoutAtomC_,
CopyOpS2R_,
CopyOpS2G_,
SmemLayoutAtomD_,
CopyOpR2S_
> {
private:
using Impl =
CollectiveEpilogue<
Sm90TmaWarpSpecialized<StagesC_, StagesD_, FragmentSize_, false, false>,
BlockTileShape_,
EpilogueTileShape_,
ElementC_,
StrideC_,
ElementD_,
StrideD_,
FusionCallbacks_,
CopyOpG2S_,
SmemLayoutAtomC_,
CopyOpS2R_,
CopyOpS2G_,
SmemLayoutAtomD_,
CopyOpR2S_
>;
public:
using DispatchPolicy = Sm90TmaWarpSpecializedBiasElementwise<StagesC_, StagesD_, FragmentSize_>;
using ElementCompute = typename Impl::ThreadEpilogueOp::ElementCompute;
using ElementBias = typename Impl::ThreadEpilogueOp::ElementBias;
using ElementT = typename Impl::ThreadEpilogueOp::ElementAux;
// Constructor inheritance
using Impl::Impl;
// Host side epilogue arguments
struct [[deprecated("use Sm90TmaWarpSpecialized Arguments instead")]]
Arguments {
struct ThreadArgs {
ElementCompute alpha{1};
ElementCompute beta{0};
ElementCompute const *alpha_ptr{nullptr};
ElementCompute const *beta_ptr{nullptr};
} thread;
ElementC_ const* ptr_C{nullptr};
StrideC_ dC{};
ElementD_* ptr_D{nullptr};
StrideD_ dD{};
ElementBias const* ptr_Bias{nullptr};
ElementT* ptr_T{nullptr};
CUTLASS_HOST_DEVICE
operator typename Impl::Arguments() const {
typename Impl::Arguments arguments;
arguments.thread.alpha = thread.alpha;
arguments.thread.beta = thread.beta;
arguments.thread.alpha_ptr = thread.alpha_ptr;
arguments.thread.beta_ptr = thread.beta_ptr;
if constexpr (not cute::is_void_v<ElementBias>) {
arguments.thread.bias_ptr = ptr_Bias;
}
if constexpr (not cute::is_void_v<ElementT>) {
arguments.thread.aux_ptr = ptr_T;
arguments.thread.dAux = dD;
}
arguments.ptr_C = ptr_C;
arguments.dC = dC;
arguments.ptr_D = ptr_D;
arguments.dD = dD;
return arguments;
}
};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace collective
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/collective/sm90_epilogue_tma_warpspecialized_bias_elementwise.hpp/0 | {
"file_path": "include/cutlass/epilogue/collective/sm90_epilogue_tma_warpspecialized_bias_elementwise.hpp",
"repo_id": "include",
"token_count": 1858
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing reduction operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a reduction sum to an array of elements.
///
///
template <
typename Element_, ///< Data type used to load and store tensors
int Count ///< Number of elements computed per operation
>
class ReductionOpPlus {
public:
using Element = Element_;
static int const kCount = Count;
using Fragment = Array<Element, kCount>;
using Operator = plus<Fragment>;
/// Host-constructable parameters structure
struct Params { };
private:
/// reduction operator
Operator operator_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
ReductionOpPlus(Params const ¶ms) {
}
/// Computes Compute =>
CUTLASS_HOST_DEVICE
Fragment operator()(
Fragment const &lhs,
Fragment const &rhs) const {
return operator_(lhs, rhs);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
| include/cutlass/epilogue/thread/reduction_op.h/0 | {
"file_path": "include/cutlass/epilogue/thread/reduction_op.h",
"repo_id": "include",
"token_count": 925
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "predicated_tile_iterator.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines the optimal thread map for TensorOp accumulator layouts
template <
typename ThreadblockShape,
typename WarpShape,
int PartitionsK,
typename ElementOutput,
int ElementsPerAccess,
typename ElementAccumulator
>
struct DefaultThreadMapVoltaTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines the optimal thread map for TensorOp accumulator layouts
template <
typename ThreadblockShape_,
typename WarpShape_,
int PartitionsK,
typename ElementOutput_,
int ElementsPerAccess
>
struct DefaultThreadMapVoltaTensorOp<
ThreadblockShape_,
WarpShape_,
PartitionsK,
ElementOutput_,
ElementsPerAccess,
half_t> {
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
static int const kPartitionsK = PartitionsK;
using ElementOutput = ElementOutput_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementAccumulator = half_t;
//
// Definitions
//
struct Detail {
static int const kTensorOpRows = 16;
static int const kWarpSize = 32;
static int const kInterleavedTilesM = WarpShape::kM / 32;
static_assert(
!(ThreadblockShape::kM % WarpShape::kM) &&
!(ThreadblockShape::kN % WarpShape::kN), "Divisibility");
/// Number of warps
using WarpCount = gemm::GemmShape<
ThreadblockShape::kM / WarpShape::kM,
ThreadblockShape::kN / WarpShape::kN,
kPartitionsK
>;
/// Number of participating threads
static int const kThreads = WarpCount::kCount * kWarpSize;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<
ThreadblockShape::kN, // column
4, // row
4, // group
WarpCount::kM, // cluster
1 // tile
>;
/// Number of iterations per subspace
using Count = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
2, // row
kInterleavedTilesM, // group
1, // cluster
WarpShape::kM / kTensorOpRows // iterations
>;
};
//
// ThreadMap
//
/// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap
using Type = OutputTileOptimalThreadMap <
typename Detail::Shape,
typename Detail::Count,
Detail::kThreads,
kElementsPerAccess,
sizeof_bits<ElementOutput>::value
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines the optimal thread map for TensorOp accumulator layouts
template <
typename ThreadblockShape_,
typename WarpShape_,
int PartitionsK,
typename ElementOutput_,
int ElementsPerAccess
>
struct DefaultThreadMapVoltaTensorOp<
ThreadblockShape_,
WarpShape_,
PartitionsK,
ElementOutput_,
ElementsPerAccess,
float> {
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
static int const kPartitionsK = PartitionsK;
using ElementOutput = ElementOutput_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementAccumulator = float;
//
// Definitions
//
struct Detail {
static int const kTensorOpRows = 16;
static int const kWarpSize = 32;
static int const kInterleavedTilesM = WarpShape::kM / 32;
static_assert(
!(ThreadblockShape::kM % WarpShape::kM) &&
!(ThreadblockShape::kN % WarpShape::kN), "Divisibility");
/// Number of warps
using WarpCount = gemm::GemmShape<
ThreadblockShape::kM / WarpShape::kM,
ThreadblockShape::kN / WarpShape::kN,
kPartitionsK
>;
/// Number of participating threads
static int const kThreads = WarpCount::kCount * kWarpSize;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<
ThreadblockShape::kN, // column
4, // row
4, // group
WarpCount::kM, // cluster
1 // tile
>;
/// Number of iterations per subspace
using Count = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
2, // row
kInterleavedTilesM, // group
1, // cluster
WarpShape::kM / kTensorOpRows // iterations
>;
};
//
// ThreadMap
//
/// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap
using Type = OutputTileOptimalThreadMap <
typename Detail::Shape,
typename Detail::Count,
Detail::kThreads,
kElementsPerAccess,
sizeof_bits<ElementOutput>::value
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h",
"repo_id": "include",
"token_count": 2469
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Generic epilogue for implementing certain kinds of fused epilogue behavior.
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/semaphore.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////////////////////////
class EpilogueFusedVisitorConcept {
public:
static int const kIterations = 1;
static int const kElementsPerAccess = 4;
using ElementOutput = float;
using ElementAccumulator = float;
using AccumulatorFragment = Array<ElementAccumulator, kElementsPerAccess>;
/// Arguments structure
struct Arguments { };
/// Params structure
struct Params {
Params() { }
Params(Arguments const &args) { }
};
/// Shared storage
struct SharedStorage { };
public:
CUTLASS_DEVICE
EpilogueFusedVisitorConcept(
Params const ¶ms, ///< Parameters routed to the epilogue
SharedStorage &shared_storage, ///< Shared storage needed by the functors here
MatrixCoord const &problem_size, ///< Problem size of the output
int thread_idx, ///< Thread index within the threadblock
int warp_idx, ///< Warp index within the threadblock
int lane_idx, ///< Lane index within the warp
MatrixCoord const &threadblock_offset = MatrixCoord(0, 0)) { ///< Coordinate
}
/// Helper to indicate split-K behavior
CUTLASS_DEVICE
void set_k_partition(
int split_k_index, ///< Index of this threadblock within split-K partitioned scheme
int split_k_slices) { ///< Total number of split-K slices
}
/// Called to set the batch index
CUTLASS_DEVICE
void set_batch_index(int batch_idx) {
}
/// Called at the start of the epilogue just before iterating over accumulator slices
CUTLASS_DEVICE
void begin_epilogue() {
}
/// Called at the start of one step before starting accumulator exchange
CUTLASS_DEVICE
void begin_step(int step_idx) {
}
/// Called at the start of a row
CUTLASS_DEVICE
void begin_row(int row_idx) {
}
/// Called after accumulators have been exchanged for each accumulator vector
CUTLASS_DEVICE
void visit(
int iter_idx,
int row_idx,
int column_idx,
int frag_idx,
AccumulatorFragment const &accum) {
}
/// Called at the end of a row
CUTLASS_DEVICE
void end_row(int row_idx) {
}
/// Called after all accumulator elements have been visited
CUTLASS_DEVICE
void end_step(int step_idx) {
}
/// Called after all steps have been completed
CUTLASS_DEVICE
void end_epilogue() {
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename Visitor_, ///< Functor containing fused operations (satisfies EpilogueFusedVisitorConcept)
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(true || !IsEpilogueFunctorHeavy<Visitor_>::value)
>
class EpilogueWithVisitor :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition> {
public:
using Visitor = Visitor_;
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = Visitor::kElementsPerAccess;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Array type used by output functor
using AccumulatorAccessType = Array<
typename WarpTileIterator::Element, kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
using SharedStorage = typename Base::SharedStorage;
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithVisitor(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.reference(), thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
Visitor & visitor,
AccumulatorTile const &accumulators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
visitor.begin_epilogue();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? Visitor::kIterations : 1)
for (int iter_idx = 0; iter_idx < Visitor::kIterations; ++iter_idx) {
//
// Load the source
//
visitor.begin_step(iter_idx);
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<Visitor::kIterations>>::push(
iter_idx, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Iterate over output fragments
//
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment[0]);
int const kAccumulatorFragmentCount = AccumulatorTile::kElements / (Visitor::kIterations * AccumulatorAccessType::kElements);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < kAccumulatorFragmentCount; ++idx) {
int row_idx = idx / SharedLoadIterator::ThreadMap::Iterations::kColumn;
int col_idx = idx % SharedLoadIterator::ThreadMap::Iterations::kColumn;
// Start a new row of the output fragment
if (!col_idx) {
visitor.begin_row(row_idx);
}
visitor.visit(
iter_idx,
row_idx,
col_idx,
idx,
accum_frag_ptr[idx]
);
// End the row of the output fragment
if (col_idx + 1 == SharedLoadIterator::ThreadMap::Iterations::kColumn) {
visitor.end_row(row_idx);
}
}
//
// Conclude the step
//
visitor.end_step(iter_idx);
}
visitor.end_epilogue();
}
private:
template<class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to create an EpilogueWithVisitor from an existing epilogue
template <typename Visitor_, typename Existing_, bool IterationsUnroll = true>
struct EpilogueWithVisitorFromExistingEpilogue {
using Epilogue = EpilogueWithVisitor<
Visitor_,
typename Existing_::Shape,
typename Existing_::WarpMmaOperator,
Existing_::kPartitionsK,
typename Existing_::AccumulatorFragmentIterator,
typename Existing_::WarpTileIterator,
typename Existing_::SharedLoadIterator,
typename Existing_::Padding,
Existing_::kFragmentsPerIteration,
IterationsUnroll
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_with_visitor.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_with_visitor.h",
"repo_id": "include",
"token_count": 4847
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/permute.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
#include "cutlass/conv/conv2d_problem_size.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: PitchLinearThreadMap)
typename Element_, ///< Element data type
typename ThreadOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1>,
typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1>
>
class PredicatedTileIteratorDirectConv {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using ThreadOutputShape = ThreadOutputShape_;
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
using ConvProblemSize = typename cutlass::conv::Conv2dProblemSize;
/// Fragment object
using Fragment = Array<Element, ThreadMap::Iterations::kCount * kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, kElementsPerAccess>;
static int const kLoadsPerAccess = AccessType::kElements / AccessType::kElements;
using ThreadTileCount = MatrixShape<
ThreadBlockOutputShape::kH / ThreadOutputShape::kH,
ThreadBlockOutputShape::kW / ThreadOutputShape::kW
>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorDirect2dConvParams {
using Base = PredicatedTileIteratorDirect2dConvParams;
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout, cutlass::conv::Conv2dProblemSize const &problem_size):
PredicatedTileIteratorDirect2dConvParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
problem_size,
{ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW}
)
{ }
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kContiguous;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorDirect2dConvParams params_;
/// Byte-level pointer
uint8_t *byte_pointer_;
///
Element *pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in rows
Index extent_column_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// A thread's starting column
Index thread_start_column_;
/// Initial thread output location
int thread_start_n_, thread_start_p_, thread_start_q_;
/// Current threadblock tile index
int tile_index_;
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorDirect2dConvParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorDirectConv(
PredicatedTileIteratorDirect2dConvParams const & params,
Element *pointer,
TensorCoord extent,
int thread_idx,
TensorCoord threadblock_offset = TensorCoord()
):
params_(params), pointer_(pointer)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx);
extent_row_ = extent.row();
extent_column_ = extent.column();
// stride dim (PQ)
thread_start_row_ = thread_offset.column();
// contiguous dim (Channels)
thread_start_column_ = threadblock_offset.column() + thread_offset.row();
tile_index_ = threadblock_offset.row();
set_tile_index(0);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void set_tile_index(const int index) {
int residual;
params_.pq_divmod(thread_start_n_, residual, tile_index_ + index);
params_.q_divmod(thread_start_p_, thread_start_q_, residual);
// Compute the base output coord of ThreadBlock
thread_start_p_ *= ThreadBlockOutputShape::kH;
thread_start_q_ *= ThreadBlockOutputShape::kW;
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
mask_.predicates[c] = ((thread_start_column_
+ c * ThreadMap::Delta::kContiguous) < extent_column_);
}
// Null pointer performs no accesses
if (!pointer_) {
mask_.clear();
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, int64_t byte_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int frag_base_idx = s * ThreadMap::Iterations::kContiguous + c;
int current_row = thread_start_row_ + s * ThreadMap::Delta::kStrided;
int p = current_row / ThreadBlockOutputShape::kW;
int q = current_row % ThreadBlockOutputShape::kW;
int current_p = thread_start_p_ + p;
int current_q = thread_start_q_ + q;
bool row_guard = (current_p) < params_.P && (current_q) < params_.Q &&
(thread_start_n_ < params_.N) && current_row < ThreadMap::Shape::kStrided;
int output_row_offset =
thread_start_n_ * params_.stride_n + current_p * params_.stride_p + current_q;
uint8_t *byte_pointer =
reinterpret_cast<uint8_t *>(pointer_) +
LongIndex(output_row_offset) * LongIndex(params_.stride) +
LongIndex(thread_start_column_ + c * ThreadMap::Delta::kContiguous) *
sizeof(AccessType) / kElementsPerAccess;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
bool guard = row_guard && mask_.predicates[c];
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr[frag_base_idx], (void *)&memory_pointer[0], guard);
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int frag_base_idx = s * ThreadMap::Iterations::kContiguous + c;
int current_row = thread_start_row_ + s * ThreadMap::Delta::kStrided;
int p = current_row / ThreadBlockOutputShape::kW;
int q = current_row % ThreadBlockOutputShape::kW;
int current_p = thread_start_p_ + p;
int current_q = thread_start_q_ + q;
bool row_guard = (current_p) < params_.P && (current_q) < params_.Q &&
(thread_start_n_ < params_.N) && current_row < ThreadMap::Shape::kStrided;
int output_row_offset =
thread_start_n_ * params_.stride_n + current_p * params_.stride_p + current_q;
uint8_t *byte_pointer =
reinterpret_cast<uint8_t *>(pointer_) +
LongIndex(output_row_offset) * LongIndex(params_.stride) +
LongIndex(thread_start_column_ + c * ThreadMap::Delta::kContiguous) *
sizeof(AccessType) / kElementsPerAccess;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
bool guard = row_guard && mask_.predicates[c];
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_base_idx], (void *)&memory_pointer[0], guard);
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) const {
store_with_byte_offset(frag, 0);
}
CUTLASS_DEVICE
MatrixCoord thread_start() const {
return MatrixCoord(thread_start_row_, thread_start_column_);
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_row() const {
return thread_start_row_;
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_column() const {
return thread_start_column_;
}
/// Extent of the matrix in rows
CUTLASS_DEVICE
Index extent_row() const {
return extent_row_;
}
/// Extent of the matrix in columns
CUTLASS_DEVICE
Index extent_column() const {
return extent_column_;
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorDirectConv &operator++() {
// do nothing
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) const {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/predicated_tile_iterator_direct_conv.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/predicated_tile_iterator_direct_conv.h",
"repo_id": "include",
"token_count": 4873
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename Element, ///< data type of element to be written
typename Layout ///< target shared memory layout
>
class TileIteratorTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename Element_ ///< data type of element to be written
>
class TileIteratorTensorOp<WarpShape_, OperatorShape_, Element_, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorLayout = Layout;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Number of times this iterator can be incremented
using TileIterations = typename Policy::TileIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Detail::kLanesInQuad * Policy::kElementsPerAccess>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
/// Thread offset
MatrixCoord thread_offset_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorTensorOp(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOp(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / Policy::kElementsPerAccess) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
thread_offset_ = {
quad_id, lane_in_quad * Policy::kElementsPerAccess
};
pointer_ += layout_({thread_offset_.row(), thread_offset_.column() / Policy::kElementsPerAccess});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / Policy::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
MatrixCoord coord_offset(
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn
);
thread_offset_ += coord_offset;
pointer_ += layout_({
coord_offset.row(),
coord_offset.column() / Policy::kElementsPerAccess
});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
pointer_[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess] = frag_ptr[n];
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
frag_ptr[n] = pointer_[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess];
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & operator++() {
return add_tile_offset({1, 0});
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename Element_, ///< data type of element to be written
int InterleavedK ///< number of interleaved k
>
class TileIteratorTensorOp<WarpShape_, OperatorShape_, Element_,
layout::ColumnMajorInterleaved<InterleavedK> > {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = Element_;
using Layout = layout::ColumnMajorInterleaved<InterleavedK>;
using TensorLayout = Layout; ///< shared memory tensor ref layout
using TensorRef = TensorRef<Element, TensorLayout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
// Policy::kRowsPerIteration,
WarpShape::kM,
InterleavedK
>;
/// This is the fragment size produced by one tile
using Fragment = Array<
Element,
Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction
* Policy::kElementsPerIteration>;
/// This is the fragment size produced by one iteration
// using Fragment = Array<
// Element, Policy::kElementsPerIteration >;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
using TileIterations = typename Policy::TileIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Detail::kLanesInQuad * Policy::kElementsPerIteration>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
TensorLayout layout_;
/// Thread offset
MatrixCoord thread_offset_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorTensorOp(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOp(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0]) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
thread_offset_ = {
quad_id, lane_in_quad * Policy::kElementsPerIteration
};
pointer_ += (layout_({thread_offset_.row(), thread_offset_.column()}) / Policy::kElementsPerAccess);
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / Policy::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
MatrixCoord coord_offset(
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn
);
thread_offset_ += coord_offset;
pointer_ += (layout_({
coord_offset.row(),
coord_offset.column()
}) / Policy::kElementsPerAccess);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction; n++ ) {
AccessType *ptr = pointer_ + layout_({n * Policy::kRowsPerIteration, 0}) / Policy::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int a = 0; a < Policy::kAccessPerIteration; ++a) {
ptr[a + pointer_offset / Policy::kElementsPerAccess] = frag_ptr[n * Policy::kAccessPerIteration + a];
// printf("store thread %d, address %p, bank %ld\n", threadIdx.x, pointer_+a+n*Detail::kLanesInQuad,
// ((long long)(pointer_+a+n*Detail::kLanesInQuad)>>2)&0x1f);
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction; n++ ) {
AccessType *ptr = pointer_ + layout_({n * Policy::kRowsPerIteration, 0}) / Policy::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int a = 0; a < Policy::kAccessPerIteration; ++a) {
frag_ptr[n * Policy::kAccessPerIteration + a] = ptr[a + pointer_offset / Policy::kElementsPerAccess];
}
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & operator++() {
return add_tile_offset({0, 1});
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename Element_, ///< data type of element to be written
typename Layout_
>
class TileIteratorTensorOpCanonical {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = Element_;
using Layout = Layout_;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
static int const kAccessSize = 1;
static int const kAccessCount = Policy::kElementsPerAccess / kAccessSize;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Detail::kLanesInQuad * Policy::kElementsPerAccess>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, kAccessSize>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
/// Guard to indicate whether the shape is divisible
bool divisible_;
/// Extent of the output tensor
MatrixCoord extent_;
/// Thread offset
MatrixCoord thread_offset_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0]),
divisible_(true),
extent_(WarpShape::kM, WarpShape::kN) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
thread_offset_ = {
quad_id, lane_in_quad * Policy::kElementsPerAccess
};
pointer_ += layout_({thread_offset_.row(), thread_offset_.column()});
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical(
TensorRef const &ref,
TensorCoord const &extent,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0]),
divisible_(false),
extent_(extent) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
thread_offset_ = {
quad_id, lane_in_quad * Policy::kElementsPerAccess
};
pointer_ += layout_({thread_offset_.row(), thread_offset_.column()});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical & add_tile_offset(TensorCoord const &tile_offset) {
MatrixCoord coord_offset(
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn
);
thread_offset_ += coord_offset;
pointer_ += layout_({
coord_offset.row(),
coord_offset.column()
});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int a = 0; a < kAccessCount; ++a) {
int ptr_idx = n * Detail::kLanesInQuad * kAccessCount + pointer_offset + a;
int frag_idx = n * kAccessCount + a;
int col = thread_offset_.column() + n * Detail::kLanesInQuad * Policy::kElementsPerAccess + a;
if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) {
pointer_[ptr_idx] = frag_ptr[frag_idx];
}
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int a = 0; a < kAccessCount; ++a) {
int ptr_idx = n * Detail::kLanesInQuad * kAccessCount + pointer_offset + a;
int frag_idx = n * kAccessCount + a;
int col = thread_offset_.column() + n * Detail::kLanesInQuad * Policy::kElementsPerAccess + a;
if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) {
frag_ptr[frag_idx] = pointer_[ptr_idx];
}
}
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical & operator++() {
return add_tile_offset({1, 0});
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/warp/tile_iterator_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/warp/tile_iterator_tensor_op.h",
"repo_id": "include",
"token_count": 6931
} | 27 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cute/tensor_predicate.hpp"
#include "cute/numeric/arithmetic_tuple.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int Stages,
class TileShape_,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm80CpAsyncUnpredicated<Stages>,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_
>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm80CpAsyncUnpredicated<Stages>;
using TileShape = TileShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
// Follow the change in TestSmall: TileShape switch to CtaShape
// For sm80 arch, CtaShape should euqal to TileShape
using CtaShape_MNK = TileShape;
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
static_assert(DispatchPolicy::Stages >= 2, "CpAsync mainloop must have at least 2 stages in the pipeline.");
struct SharedStorage
{
cute::array_aligned<ElementA, cute::cosize_v<SmemLayoutA>> smem_a;
cute::array_aligned<ElementB, cute::cosize_v<SmemLayoutB>> smem_b;
};
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A;
StrideA dA;
ElementB const* ptr_B;
StrideB dB;
};
// Device side kernel params
using Params = Arguments;
//
// Methods
//
CollectiveMma() = default;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& _, Arguments const& args, void* workspace) {
(void) workspace;
return args;
}
/// Perform a collective-scoped matrix multiply-accumulate
template <
class FrgTensorD,
class TensorA,
class TensorB,
class FrgTensorC,
class KTileIterator,
class ResidueMNK
>
CUTLASS_DEVICE void
operator() (
FrgTensorD &accum,
TensorA gA,
TensorB gB,
FrgTensorC const &src_accum,
KTileIterator k_tile_iter, int k_tile_count,
ResidueMNK residue_mnk,
int thread_idx,
char *smem_buf)
{
using namespace cute;
static_assert(is_rmem<FrgTensorD>::value, "D tensor must be rmem resident.");
static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident.");
static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident.");
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 3,
"MainloopSm80CpAsync must have a pipeline mode in the smem layout.");
static_assert(cute::rank(SmemLayoutB{}) == 3,
"MainloopSm80CpAsync must have a pipeline mode in the smem layout.");
// Construct shared memory tiles
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf);
Tensor sA = make_tensor(make_smem_ptr(storage.smem_a.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(storage.smem_b.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
CUTE_STATIC_ASSERT_V(size<0>(gA) == size<0>(sA)); // BLK_M
CUTE_STATIC_ASSERT_V(size<1>(gA) == size<1>(sA)); // BLK_K
CUTE_STATIC_ASSERT_V(size<0>(gB) == size<0>(sB)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(gB) == size<1>(sB)); // BLK_K
CUTE_STATIC_ASSERT_V(size<1>(sA) == size<1>(sB)); // BLK_K
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE
// Partition the copying of A and B tiles across the threads
GmemTiledCopyA gmem_tiled_copy_A;
GmemTiledCopyB gmem_tiled_copy_B;
auto gmem_thr_copy_A = gmem_tiled_copy_A.get_slice(thread_idx);
auto gmem_thr_copy_B = gmem_tiled_copy_B.get_slice(thread_idx);
Tensor tAgA = gmem_thr_copy_A.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k)
Tensor tAsA = gmem_thr_copy_A.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE)
Tensor tBgB = gmem_thr_copy_B.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k)
Tensor tBsB = gmem_thr_copy_B.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE)
//
// PREDICATES
//
(void) residue_mnk;
//assert(residue_mnk == make_tuple(0,0,0));
//
// PREFETCH
//
// Start async loads for all pipes but the last
CUTLASS_PRAGMA_UNROLL
for (int k_pipe = 0; k_pipe < DispatchPolicy::Stages-1; ++k_pipe) {
copy(gmem_tiled_copy_A, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,k_pipe));
copy(gmem_tiled_copy_B, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,k_pipe));
cp_async_fence();
--k_tile_count;
if (k_tile_count > 0) { ++k_tile_iter; }
}
//
// MMA Atom partitioning
//
// Tile MMA compute thread partitions and allocate accumulators
TiledMma tiled_mma;
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCrA = thr_mma.partition_fragment_A(sA(_,_,0)); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.partition_fragment_B(sB(_,_,0)); // (MMA,MMA_N,MMA_K)
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(src_accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(src_accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K
CUTE_STATIC_ASSERT_V(size(gmem_tiled_copy_A) == size(tiled_mma));
CUTE_STATIC_ASSERT_V(size(gmem_tiled_copy_B) == size(tiled_mma));
//
// Copy Atom retiling
//
auto smem_tiled_copy_A = make_tiled_copy_A(SmemCopyAtomA{}, tiled_mma);
auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(thread_idx);
Tensor tCsA = smem_thr_copy_A.partition_S(sA); // (CPY,CPY_M,CPY_K,PIPE)
Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); // (CPY,CPY_M,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K
auto smem_tiled_copy_B = make_tiled_copy_B(SmemCopyAtomB{}, tiled_mma);
auto smem_thr_copy_B = smem_tiled_copy_B.get_thread_slice(thread_idx);
Tensor tCsB = smem_thr_copy_B.partition_S(sB); // (CPY,CPY_N,CPY_K,PIPE)
Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB); // (CPY,CPY_N,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // CPY_N
CUTE_STATIC_ASSERT_V(size<2>(tCsB) == size<2>(tCrB_copy_view)); // CPY_K
//
// PIPELINED MAIN LOOP
//
// Current pipe index in smem to read from
int smem_pipe_read = 0;
// Current pipe index in smem to write to
int smem_pipe_write = DispatchPolicy::Stages-1;
Tensor tCsA_p = tCsA(_,_,_,smem_pipe_read);
Tensor tCsB_p = tCsB(_,_,_,smem_pipe_read);
// Size of the register pipeline
auto K_BLOCK_MAX = size<2>(tCrA);
// PREFETCH register pipeline
if (K_BLOCK_MAX > 1) {
// Wait until our first prefetched tile is loaded in
cp_async_wait<DispatchPolicy::Stages-2>();
__syncthreads();
// Prefetch the first rmem from the first k-tile
copy(smem_tiled_copy_A, tCsA_p(_,_,Int<0>{}), tCrA_copy_view(_,_,Int<0>{}));
copy(smem_tiled_copy_B, tCsB_p(_,_,Int<0>{}), tCrB_copy_view(_,_,Int<0>{}));
}
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > -(DispatchPolicy::Stages-1); --k_tile_count)
{
// Pipeline the outer products with a static for loop.
//
// Note, the for_each() function is required here to ensure `k_block` is of type Int<x>.
for_each(make_int_sequence<K_BLOCK_MAX>{}, [&] (auto k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
// Slice the smem_pipe_read smem
tCsA_p = tCsA(_,_,_,smem_pipe_read);
tCsB_p = tCsB(_,_,_,smem_pipe_read);
// Commit the smem for smem_pipe_read
cp_async_wait<DispatchPolicy::Stages-2>();
__syncthreads();
}
// Load A, B shmem->regs for k_block+1
auto k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static
copy(smem_tiled_copy_A, tCsA_p(_,_,k_block_next), tCrA_copy_view(_,_,k_block_next));
copy(smem_tiled_copy_B, tCsB_p(_,_,k_block_next), tCrB_copy_view(_,_,k_block_next));
// Copy gmem to smem before computing gemm on each k-pipe
if (k_block == 0)
{
copy(gmem_tiled_copy_A, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,smem_pipe_write));
copy(gmem_tiled_copy_B, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,smem_pipe_write));
cp_async_fence();
if (k_tile_count > 0) { ++k_tile_iter; }
// Advance the pipe -- Doing it here accounts for K_BLOCK_MAX = 1 (no rmem pipe)
smem_pipe_write = smem_pipe_read;
++smem_pipe_read;
smem_pipe_read = (smem_pipe_read == DispatchPolicy::Stages) ? 0 : smem_pipe_read;
}
// Transform before compute
cute::transform(tCrA(_,_,k_block), TransformA{});
cute::transform(tCrB(_,_,k_block), TransformB{});
// Thread-level register gemm for k_block
cute::gemm(tiled_mma, accum, tCrA(_,_,k_block), tCrB(_,_,k_block), src_accum);
});
}
cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int Stages,
class TileShape_,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_
>
struct CollectiveMma<
MainloopSm80CpAsync<Stages>,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_
>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm80CpAsync<Stages>;
using TileShape = TileShape_;
// Follow the change in TestSmall: TileShape switch to CtaShape
// In legacy arch, it should be same
using CtaShape_MNK = TileShape;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC; using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
static_assert(DispatchPolicy::Stages >= 2, "CpAsync mainloop must have at least 2 stages in the pipeline.");
struct SharedStorage
{
cute::array_aligned<ElementA, cute::cosize_v<SmemLayoutA>> smem_a;
cute::array_aligned<ElementB, cute::cosize_v<SmemLayoutB>> smem_b;
};
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A;
StrideA dA;
ElementB const* ptr_B;
StrideB dB;
};
// Device side kernel params
using Params = Arguments;
//
// Methods
//
CollectiveMma() = default;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& _, Arguments const& args, void* workspace) {
(void) workspace;
return args;
}
/// Perform a collective-scoped matrix multiply-accumulate
template <
class FrgTensorD,
class TensorA,
class TensorB,
class FrgTensorC,
class KTileIterator,
class ResidueMNK
>
CUTLASS_DEVICE void
operator() (
FrgTensorD &accum,
TensorA gA, // (BLK_M, BLK_K, K_TILES)
TensorB gB, // (BLK_N, BLK_K, K_TILES)
FrgTensorC const &src_accum,
KTileIterator k_tile_iter, int k_tile_count,
ResidueMNK residue_mnk,
int thread_idx,
char *smem_buf)
{
using namespace cute;
static_assert(is_rmem<FrgTensorD>::value, "D tensor must be rmem resident.");
static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident.");
static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident.");
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3.");
// Construct shared memory tiles
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf);
Tensor sA = make_tensor(make_smem_ptr(storage.smem_a.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(storage.smem_b.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
CUTE_STATIC_ASSERT_V(size<0>(gA) == size<0>(sA)); // BLK_M
CUTE_STATIC_ASSERT_V(size<1>(gA) == size<1>(sA)); // BLK_K
CUTE_STATIC_ASSERT_V(size<0>(gB) == size<0>(sB)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(gB) == size<1>(sB)); // BLK_K
CUTE_STATIC_ASSERT_V(size<1>(sA) == size<1>(sB)); // BLK_K
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE
// Shift tensor so residue_k is at origin (Can't read any k_coord < residue_k)
// This aligns the tensor with BLK_K for all but the 0th k_tile
gA.data() = &gA(0, get<2>(residue_mnk), 0);
gB.data() = &gB(0, get<2>(residue_mnk), 0);
// Partition the copying of A and B tiles across the threads
GmemTiledCopyA gmem_tiled_copy_A;
GmemTiledCopyB gmem_tiled_copy_B;
auto gmem_thr_copy_A = gmem_tiled_copy_A.get_slice(thread_idx);
auto gmem_thr_copy_B = gmem_tiled_copy_B.get_slice(thread_idx);
Tensor tAgA = gmem_thr_copy_A.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k)
Tensor tAsA = gmem_thr_copy_A.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE)
Tensor tBgB = gmem_thr_copy_B.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k)
Tensor tBsB = gmem_thr_copy_B.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE)
//
// PREDICATES
//
// Allocate predicate tensors for m and n
Tensor tApA = make_tensor<bool>(make_shape(size<1>(tAsA), size<2>(tAsA)), Stride<_1,_0>{});
Tensor tBpB = make_tensor<bool>(make_shape(size<1>(tBsB), size<2>(tBsB)), Stride<_1,_0>{});
// Construct identity layout for sA and sB
Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k)
Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k)
// Repeat the partitioning with identity layouts
Tensor tAcA = gmem_thr_copy_A.partition_S(cA); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k)
Tensor tBcB = gmem_thr_copy_B.partition_S(cB); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k)
// Set predicates for m bounds
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < size<0>(tApA); ++m) {
tApA(m,0) = get<0>(tAcA(0,m,0)) < get<0>(residue_mnk); // blk_m coord < residue_m
}
// Set predicates for n bounds
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < size<0>(tBpB); ++n) {
tBpB(n,0) = get<0>(tBcB(0,n,0)) < get<1>(residue_mnk); // blk_n coord < residue_n
}
//
// PREFETCH
//
// Clear the smem tiles to account for predicated off loads
clear(tAsA);
clear(tBsB);
// Start async loads for 0th k-tile, where we take care of the k residue
{
constexpr int k_pipe = 0;
Tensor tAgAk = tAgA(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tAsA); ++k) {
if (get<1>(tAcA(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gA shifted)
copy_if(gmem_tiled_copy_A, tApA(_,k), tAgAk(_,_,k), tAsA(_,_,k,k_pipe));
}
}
Tensor tBgBk = tBgB(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tBsB); ++k) {
if (get<1>(tBcB(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gB shifted)
copy_if(gmem_tiled_copy_B, tBpB(_,k), tBgBk(_,_,k), tBsB(_,_,k,k_pipe));
}
}
cp_async_fence();
++k_tile_iter;
--k_tile_count;
}
// Start async loads for 1st k-tile onwards, no k-residue handling needed
CUTLASS_PRAGMA_UNROLL
for (int k_pipe = 1; k_pipe < DispatchPolicy::Stages-1; ++k_pipe) {
if (k_tile_count <= 0) {
clear(tApA);
clear(tBpB);
}
copy_if(gmem_tiled_copy_A, tApA, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,k_pipe)); // CpAsync
copy_if(gmem_tiled_copy_B, tBpB, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,k_pipe)); // CpAsync
cp_async_fence();
++k_tile_iter;
--k_tile_count;
}
//
// MMA Atom partitioning
//
// Tile MMA compute thread partitions and allocate accumulators
TiledMma tiled_mma;
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCrA = thr_mma.partition_fragment_A(sA(_,_,0)); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.partition_fragment_B(sB(_,_,0)); // (MMA,MMA_N,MMA_K)
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(src_accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(src_accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K
//
// Copy Atom retiling
//
auto smem_tiled_copy_A = make_tiled_copy_A(SmemCopyAtomA{}, tiled_mma);
auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(thread_idx);
Tensor tCsA = smem_thr_copy_A.partition_S(sA); // (CPY,CPY_M,CPY_K,PIPE)
Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); // (CPY,CPY_M,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K
auto smem_tiled_copy_B = make_tiled_copy_B(SmemCopyAtomB{}, tiled_mma);
auto smem_thr_copy_B = smem_tiled_copy_B.get_thread_slice(thread_idx);
Tensor tCsB = smem_thr_copy_B.partition_S(sB); // (CPY,CPY_N,CPY_K,PIPE)
Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB); // (CPY,CPY_N,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // CPY_N
CUTE_STATIC_ASSERT_V(size<2>(tCsB) == size<2>(tCrB_copy_view)); // CPY_K
//
// PIPELINED MAIN LOOP
//
// Current pipe index in smem to read from
int smem_pipe_read = 0;
// Current pipe index in smem to write to
int smem_pipe_write = DispatchPolicy::Stages-1;
Tensor tCsA_p = tCsA(_,_,_,smem_pipe_read);
Tensor tCsB_p = tCsB(_,_,_,smem_pipe_read);
// Size of the register pipeline
auto K_BLOCK_MAX = size<2>(tCrA);
// PREFETCH register pipeline
if (K_BLOCK_MAX > 1) {
// Wait until our first prefetched tile is loaded in
cp_async_wait<DispatchPolicy::Stages-2>();
__syncthreads();
// Prefetch the first rmem from the first k-tile
copy(smem_tiled_copy_A, tCsA_p(_,_,Int<0>{}), tCrA_copy_view(_,_,Int<0>{}));
copy(smem_tiled_copy_B, tCsB_p(_,_,Int<0>{}), tCrB_copy_view(_,_,Int<0>{}));
}
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > -(DispatchPolicy::Stages-1); --k_tile_count)
{
// Pipeline the outer products with a static for loop.
//
// Note, the for_each() function is required here to ensure `k_block` is of type Int<N>.
for_each(make_int_sequence<K_BLOCK_MAX>{}, [&] (auto k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
// Slice the smem_pipe_read smem
tCsA_p = tCsA(_,_,_,smem_pipe_read);
tCsB_p = tCsB(_,_,_,smem_pipe_read);
// Commit the smem for smem_pipe_read
cp_async_wait<DispatchPolicy::Stages-2>();
__syncthreads();
}
// Load A, B shmem->regs for k_block+1
auto k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static
copy(smem_tiled_copy_A, tCsA_p(_,_,k_block_next), tCrA_copy_view(_,_,k_block_next));
copy(smem_tiled_copy_B, tCsB_p(_,_,k_block_next), tCrB_copy_view(_,_,k_block_next));
// Copy gmem to smem before computing gemm on each k-pipe
if (k_block == 0)
{
// Set all predicates to false if we are going to overshoot bounds
if (k_tile_count <= 0) {
clear(tApA);
clear(tBpB);
}
copy_if(gmem_tiled_copy_A, tApA, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,smem_pipe_write));
copy_if(gmem_tiled_copy_B, tBpB, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,smem_pipe_write));
cp_async_fence();
++k_tile_iter;
// Advance the pipe -- Doing it here accounts for K_BLOCK_MAX = 1 (no rmem pipe)
smem_pipe_write = smem_pipe_read;
++smem_pipe_read;
smem_pipe_read = (smem_pipe_read == DispatchPolicy::Stages) ? 0 : smem_pipe_read;
}
// Transform before compute
cute::transform(tCrA(_,_,k_block), TransformA{});
cute::transform(tCrB(_,_,k_block), TransformB{});
// Thread-level register gemm for k_block
cute::gemm(tiled_mma, accum, tCrA(_,_,k_block), tCrB(_,_,k_block), src_accum);
});
}
cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/collective/sm80_mma_multistage.hpp/0 | {
"file_path": "include/cutlass/gemm/collective/sm80_mma_multistage.hpp",
"repo_id": "include",
"token_count": 13161
} | 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined RankK kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/rank_k_universal.h"
#include "cutlass/gemm/kernel/default_rank_k_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassTensorOp,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm80,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
typename threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementA_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementA_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by SYRK
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::Operator,
/// Complex elementwise transformation
ComplexTransform TransformA = ComplexTransform::kNone,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_ = BlasMode::kSymmetric>
class RankK {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static FillMode const kFillModeC = FillModeC;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static ComplexTransform const kTransformA = TransformA;
static BlasMode const kBlasMode = BlasMode_;
static int const kUpdateRank = 1;
/// Define the kernel
using RankKkernel = typename kernel::DefaultRankKUniversal<
ElementA,
LayoutA,
kTransformA,
kAlignmentA,
ElementC,
LayoutC,
kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator,
kBlasMode
>::RankKkernel;
using Arguments = typename RankKkernel::Arguments;
private:
/// Kernel parameters object
typename RankKkernel::Params params_;
public:
/// Constructs the SYRK.
RankK() { }
/// Determines whether the SYRK can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
Status status = RankKkernel::can_implement(args);
if (FillModeC != FillMode::kLower && FillModeC != FillMode::kUpper) {
return Status::kErrorInvalidProblem;
}
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial && args.batch_count > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes SYRK state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial) {
if (args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
}
int gemm_k_size = args.problem_size.k();
// Initialize the Params structure
params_ = typename RankKkernel::Params{
args,
grid_tiled_shape,
gemm_k_size,
static_cast<int *>(workspace)
};
int smem_size = int(sizeof(typename RankKkernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<RankKkernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes && !workspace) {
return Status::kErrorWorkspaceNull;
}
params_.update(args, workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(RankKkernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename RankKkernel::SharedStorage));
cutlass::Kernel<RankKkernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for column-major output exchange operand.
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial,
/// Operation performed by RankK update kernel
typename Operator_,
/// Complex elementwise transformation
ComplexTransform TransformA,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_
>
class RankK<ElementA_, LayoutA_, ElementC_,
layout::ColumnMajor, // partially specialized on LayoutC
FillModeC, ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ThreadblockSwizzle_, Stages, AlignmentA,
SplitKSerial, Operator_, TransformA, BlasMode_> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static FillMode const kFillModeC = FillModeC;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static BlasMode const kBlasMode = BlasMode_;
static int const kUpdateRank = 1;
// Complex transform for input A matrices (function on input layout)
static ComplexTransform const kTransformA = TransformA;
/// Define the kernel
using UnderlyingOperator = typename cutlass::gemm::device::RankK<
ElementA,
LayoutA,
ElementC,
layout::RowMajor,
InvertFillMode<FillModeC>::mode,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kAlignmentA,
kSplitKSerial,
Operator,
kTransformA,
kBlasMode
>;
/// Argument structure
using Arguments = typename UnderlyingOperator::Arguments;
using RankKkernel = typename UnderlyingOperator::RankKkernel;
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the RankK.
RankK() { }
/// Helper to construct a transposed equivalent for the underying RankK operator
static Arguments to_underlying_arguments(Arguments const &args) {
return args;
}
/// Determines whether the RankK can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Computes the grid shape
static dim3 get_grid_shape(Arguments const &args) {
return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args));
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int smem_capacity = -1) {
return UnderlyingOperator::maximum_active_blocks(smem_capacity);
}
/// Initializes RankK state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace RankK
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/rank_k.h/0 | {
"file_path": "include/cutlass/gemm/device/rank_k.h",
"repo_id": "include",
"token_count": 5606
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Default sparse GEMM with visitor.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm.h"
#include "cutlass/gemm/kernel/default_gemm_sparse.h"
#include "cutlass/gemm/kernel/sparse_gemm_with_visitor.h"
#include "cutlass/gemm/kernel/gemm_pipelined.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h"
#include "cutlass/gemm/threadblock/default_sparse_mma.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename FusionCallbacks,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Number of stages used in the pipelined epilogue
int EpilogueStages = 1>
struct DefaultSparseGemmWithVisitor;
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename FusionCallbacks,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Number of stages used in the pipelined epilogue
int EpilogueStages>
struct DefaultSparseGemmWithVisitor<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
FusionCallbacks, ThreadblockSwizzle, Stages, Operator,
EpilogueStages> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultSparseMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
static constexpr int kAlignmentC = 128 / sizeof_bits<ElementC>::value;;
using ElementEpilogue = ElementAccumulator;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
using EpilogueOutputOp =
typename epilogue::thread::LinearCombination<
ElementC, kAlignmentC,
ElementAccumulator, ElementEpilogue>;
using BaseEpilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK,
EpilogueOutputOp, EpilogueOutputOp::kCount>::Epilogue;
// Define epilogue
using Epilogue = cutlass::epilogue::threadblock::EpilogueWithVisitorCallbacks<
BaseEpilogue,
FusionCallbacks,
EpilogueStages>;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::SparseGemmWithEpilogueVisitor<Mma, Epilogue, ThreadblockSwizzle>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/default_gemm_sparse_with_visitor.h/0 | {
"file_path": "include/cutlass/gemm/kernel/default_gemm_sparse_with_visitor.h",
"repo_id": "include",
"token_count": 2603
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Base scheduler for grouped problems
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumerated type describing the type of scheduling to perform for the ProblemVisitor
enum class GroupScheduleMode {
// Perform all scheduling on device
kDeviceOnly,
// Precompute on the host the full sequence of problems to access
kHostPrecompute
};
/// Visitor class to abstract away the algorithm for iterating over tiles
template <typename ProblemSizeHelper,
typename ThreadblockShape_>
struct BaseGroupedProblemVisitor {
using ThreadblockShape = ThreadblockShape_;
struct ProblemInfo {
static int32_t const kNoPrefetchEntry = -1;
int32_t problem_idx;
int32_t problem_start;
CUTLASS_DEVICE
ProblemInfo() : problem_idx(kNoPrefetchEntry), problem_start(kNoPrefetchEntry) {}
CUTLASS_DEVICE
ProblemInfo(int32_t problem_idx_, int32_t problem_start_) :
problem_idx(problem_idx_), problem_start(problem_start_) {}
};
struct Params {
cutlass::gemm::GemmCoord const *problem_sizes;
int32_t problem_count;
void const *workspace;
int32_t tile_count;
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
Params(): problem_sizes(nullptr), problem_count(0), workspace(nullptr), tile_count(0) { }
/// Ctor
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const *problem_sizes,
int32_t problem_count,
void const *workspace = nullptr,
int32_t tile_count = 0
):
problem_sizes(problem_sizes),
problem_count(problem_count),
workspace(workspace),
tile_count(tile_count)
{}
};
Params params;
int32_t tile_idx;
int32_t problem_tile_start;
int32_t problem_idx;
//
// Methods
//
CUTLASS_DEVICE
BaseGroupedProblemVisitor(
Params const ¶ms_,
int32_t block_idx
):
params(params_),
tile_idx(block_idx),
problem_tile_start(0),
problem_idx(0)
{}
/// Get the grid shape
CUTLASS_HOST_DEVICE
static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) {
return ProblemSizeHelper::grid_shape(problem);
}
/// Gets the global tile index
CUTLASS_HOST_DEVICE
int32_t tile_index() const {
return tile_idx;
}
/// Gets the index of the problem
CUTLASS_HOST_DEVICE
int32_t problem_index() const {
return problem_idx;
}
CUTLASS_HOST_DEVICE
int32_t threadblock_idx() const {
return tile_idx - problem_tile_start;
}
CUTLASS_DEVICE
void advance(int32_t grid_size) {
tile_idx += grid_size;
}
CUTLASS_HOST_DEVICE
static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {
ProblemSizeHelper::possibly_transpose_problem(problem);
}
/// Returns the problem size for the current problem
CUTLASS_HOST_DEVICE
cutlass::gemm::GemmCoord problem_size() const {
GemmCoord problem = params.problem_sizes[problem_idx];
ProblemSizeHelper::possibly_transpose_problem(problem);
return problem;
}
CUTLASS_HOST_DEVICE
static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) {
return ProblemSizeHelper::tile_count(grid);
}
static int32_t group_tile_count(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count) {
int32_t total_tiles = 0;
for (int32_t i = 0; i < problem_count; ++i) {
auto problem = host_problem_sizes_ptr[i];
possibly_transpose_problem(problem);
auto grid = grid_shape(problem);
total_tiles += tile_count(grid);
}
return total_tiles;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ProblemSizeHelper,
typename ThreadblockShape,
GroupScheduleMode GroupScheduleMode_,
int PrefetchTileCount,
int ThreadCount
>
struct GroupedProblemVisitor;
/////////////////////////////////////////////////////////////////////////////////////////////////
// ProblemVisitor that performs all scheduling on device
//
template <typename ProblemSizeHelper,
typename ThreadblockShape,
int PrefetchTileCount,
int ThreadCount>
struct GroupedProblemVisitor<ProblemSizeHelper,
ThreadblockShape,
GroupScheduleMode::kDeviceOnly,
PrefetchTileCount,
ThreadCount>: public BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape> {
using Base = BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape>;
using Params = typename Base::Params;
static int const kThreadCount = ThreadCount;
static bool const kRequiresPrecomputation = false;
static int const kThreadsPerWarp = 32;
struct SharedStorage {};
// Final tile of the problem loaded by this thread. Each thread will hold
// a separate value.
int32_t problem_ending_tile;
SharedStorage &shared_storage;
//
// Methods
//
CUTLASS_DEVICE
GroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base(params_, block_idx),
problem_ending_tile(0),
shared_storage(shared_storage_)
{
this->problem_idx = -1 * kThreadsPerWarp;
this->problem_tile_start = 0;
}
CUTLASS_DEVICE
bool next_tile() {
// Check whether the tile to compute is within the range of the current problem.
int32_t problem_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, this->problem_idx % kThreadsPerWarp);
if (this->tile_idx < problem_tile_end) {
return true;
}
// Check whether the tile to compute is within the current group of problems fetched by the warp.
// The last tile for this group is the final tile of the problem held by the final thread in the warp.
int32_t group_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1);
// Keep the starting problem for this group in `problem_idx`. This is done to reduce
// register pressure. The starting problem for this group is simply the first problem
// in the group most recently fetched by the warp.
int32_t &group_problem_start = this->problem_idx;
group_problem_start = (this->problem_idx / kThreadsPerWarp) * kThreadsPerWarp;
// Keep the starting tile for this group in `problem_tile_start`. This is done to reduce
// register pressure.
int32_t &group_tile_start = this->problem_tile_start;
// Each thread in the warp processes a separate problem to advance until
// reaching a problem whose starting tile is less less than tile_idx.
while (group_tile_end <= this->tile_idx) {
group_problem_start += kThreadsPerWarp;
if (group_problem_start > this->params.problem_count) {
return false;
}
// Since `group_tile_start` is a reference to `this->problem_tile_start`, this
// also sets `this->problem_tile_start`. The fact that `this->problem_tile_start`
// is also set here is used later in `next_tile`.
group_tile_start = group_tile_end;
int lane_idx = threadIdx.x % kThreadsPerWarp;
int32_t lane_problem = group_problem_start + lane_idx;
// Compute the number of tiles in the problem assigned to each thread.
problem_ending_tile = 0;
if (lane_problem < this->params.problem_count) {
cutlass::gemm::GemmCoord problem = this->params.problem_sizes[lane_problem];
this->possibly_transpose_problem(problem);
cutlass::gemm::GemmCoord grid = this->grid_shape(problem);
problem_ending_tile = this->tile_count(grid);
}
// Compute a warp-wide inclusive prefix sum to compute the ending tile index of
// each thread's problem.
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < kThreadsPerWarp; i <<= 1) {
int32_t val = __shfl_up_sync(0xffffffff, problem_ending_tile, i);
if (lane_idx >= i) {
problem_ending_tile += val;
}
}
// The total tile count for this group is now in the final position of the prefix sum
int32_t tiles_in_group = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1);
problem_ending_tile += group_tile_start;
group_tile_end += tiles_in_group;
}
// The next problem to process is the first one that does not have ending tile position
// that is greater than or equal to tile index.
int32_t problem_idx_in_group =
__popc(__ballot_sync(0xffffffff, problem_ending_tile <= this->tile_idx));
this->problem_idx = group_problem_start + problem_idx_in_group;
// The starting tile for this problem is the ending tile of the previous problem. In cases
// where `problem_idx_in_group` is the first problem in the group, we do not need to reset
// `problem_tile_start`, because it is set to the previous group's ending tile in the while
// loop above.
if (problem_idx_in_group > 0) {
this->problem_tile_start = __shfl_sync(0xffffffff, problem_ending_tile, problem_idx_in_group - 1);
}
return true;
}
static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count) {
return 0;
}
static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count,
void* host_workspace_ptr) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Precomputes schedule on host and prefetches into shared memory
//
template <typename ProblemSizeHelper,
typename ThreadblockShape,
int PrefetchTileCount,
int ThreadCount>
struct GroupedProblemVisitor<ProblemSizeHelper,
ThreadblockShape,
GroupScheduleMode::kHostPrecompute,
PrefetchTileCount,
ThreadCount> : public BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape> {
static_assert(PrefetchTileCount > 0,
"GroupedProblemVisitor with GroupScheduleMode `kHostPrecompute` currently requires prefetching to shared memory");
using Base = BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape>;
using Params = typename Base::Params;
using ProblemInfo = typename Base::ProblemInfo;
static bool const kRequiresPrecomputation = true;
static int const kPrefetchTileCount = PrefetchTileCount;
static int const kThreadCount = ThreadCount;
struct SharedStorage {
// Sequence of problem IDs and starting tiles to compute
cutlass::Array<ProblemInfo, kPrefetchTileCount> prefetched_problems;
};
int32_t tiles_computed;
int32_t iterations_per_block;
int32_t block_load_start;
SharedStorage &shared_storage;
ProblemInfo const *problem_info_ptr;
//
// Methods
//
CUTLASS_DEVICE
GroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base(params_, block_idx),
tiles_computed(0),
shared_storage(shared_storage_),
problem_info_ptr(reinterpret_cast<ProblemInfo const*>(params_.workspace))
{
iterations_per_block = (params_.tile_count - 1 + gridDim.x) / gridDim.x;
block_load_start = iterations_per_block * block_idx;
// Start prefetching the first set of tiles to compute
prefetch_tiles();
}
CUTLASS_DEVICE
bool next_tile() {
if (this->tile_idx >= this->params.tile_count) {
return false;
}
int32_t prefetch_idx = (tiles_computed % kPrefetchTileCount);
if (prefetch_idx == 0) {
// Ensure all previous stores to shared memory have been completed
__syncthreads();
}
auto problem_info = shared_storage.prefetched_problems[prefetch_idx];
++tiles_computed;
if ((tiles_computed % kPrefetchTileCount) == 0) {
// Begin prefetching next set of tiles. Synchronize first to ensure that
// we don't overwrite the current buffer while someone else is using it.
__syncthreads();
prefetch_tiles();
}
this->problem_idx = problem_info.problem_idx;
this->problem_tile_start = problem_info.problem_start;
return true;
}
static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count) {
int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count);
int32_t entries_per_block = ((total_tiles - 1 + block_count) / block_count);
return sizeof(ProblemInfo) * entries_per_block * block_count;
}
#if !defined(__CUDACC_RTC__)
static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count,
void* host_workspace_ptr) {
ProblemInfo* host_problem_info_ptr = reinterpret_cast<ProblemInfo*>(host_workspace_ptr);
int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count);
int32_t entries_per_block = (total_tiles - 1 + block_count) / block_count;
int tile = 0;
int start_tile = 0;
for (int p_idx = 0; p_idx < problem_count; ++p_idx) {
auto problem = host_problem_sizes_ptr[p_idx];
Base::possibly_transpose_problem(problem);
auto grid = Base::grid_shape(problem);
int tiles = Base::tile_count(grid);
ProblemInfo problem_info(p_idx, start_tile);
for (int i = 0; i < tiles; ++i, ++tile) {
host_problem_info_ptr[(entries_per_block * (tile % block_count)) + (tile / block_count)] = problem_info;
}
start_tile += tiles;
}
}
#endif
private:
CUTLASS_DEVICE
void prefetch_tiles() {
CUTLASS_PRAGMA_UNROLL
for (int32_t i = 0; i < kPrefetchTileCount; i += kThreadCount) {
int32_t offset = threadIdx.x + i;
if (offset < kPrefetchTileCount && (tiles_computed + offset < iterations_per_block)) {
shared_storage.prefetched_problems[offset] = problem_info_ptr[block_load_start + tiles_computed + offset];
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/grouped_problem_visitor.h/0 | {
"file_path": "include/cutlass/gemm/kernel/grouped_problem_visitor.h",
"repo_id": "include",
"token_count": 6168
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Default template for a Blocked-Ell MMA.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/gemm/threadblock/default_mma_core_wmma.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
#include "cutlass/gemm/threadblock/ell_mma_pipelined.h"
#include "cutlass/gemm/threadblock/ell_mma_multistage.h"
#include "cutlass/transform/threadblock/ell_predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false
>
struct DefaultEllMma;
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass Simt)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
arch::OpClassSimt, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator
>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator
>
struct DefaultEllMma<float, LayoutA, kAlignmentA, float, LayoutB,
kAlignmentB, float, layout::RowMajor,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, float, LayoutA, float,
LayoutB, float, layout::RowMajor, arch::OpClassTensorOp, 2,
arch::OpMultiplyAddFastF16>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
float, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
float, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, float,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Number of Interleaved K
int InterleavedK>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass,
ArchTag, ThreadblockShape, WarpShape, InstructionShape, 2,
Operator, true> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, 2, Operator,
true>;
static_assert(kAlignmentA == 128 / sizeof_bits<ElementA>::value,
"Alignment must match thread data map's vector length");
static_assert(kAlignmentB ==128 / sizeof_bits<ElementB>::value,
"Alignment must match thread data map's vector length");
// Define iterators over tiles from the A operand
using IteratorA = cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>, ElementA,
LayoutA, 1, typename MmaCore::IteratorThreadMapA>;
// Define iterators over tiles from the B operand
using IteratorB = cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>, ElementB,
LayoutB, 0, typename MmaCore::IteratorThreadMapB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>,
typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator
>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator, false> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, Operator>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor,
typename MmaCore::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator
>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator, false> {
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * kAlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * kAlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, Operator, false, CacheOpA, CacheOpB>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor,
typename MmaCore::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Number of Interleaved K
int InterleavedK>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass,
ArchTag, ThreadblockShape, WarpShape, InstructionShape,
Stages, Operator, true> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, Stages,
Operator, true>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor,
typename MmaCore::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for SIMT IDP4A Kernels
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Operation performed by GEMM
typename Operator,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape>
struct DefaultEllMma<int8_t, LayoutA, kAlignmentA, int8_t, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
ArchTag, ThreadblockShape, WarpShape, GemmShape<1, 1, 4>, 2,
Operator, false> {
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using ElementB = int8_t;
using OperatorClass = arch::OpClassSimt;
static const bool transposeA = cutlass::platform::is_same< LayoutA, layout::ColumnMajor >::value;
static const bool transposeB = cutlass::platform::is_same< LayoutB, layout::RowMajor >::value;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
OperatorClass, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
/// Specialization for Wmma TensorOp operator with 2 staged pipeline
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
LayoutC, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for Wmma TensorOp operator with 1 staged pipeline
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 1, Operator, false> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, 1, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped singlestage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaSingleStage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
LayoutC, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
#endif //CUTLASS_ARCH_WMMA_ENABLED
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/default_ell_mma.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_ell_mma.h",
"repo_id": "include",
"token_count": 10635
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level per channel scale+bias+relu before
matrix multiply-accumulate operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename FragmentActivations, typename FragmentVarMean, typename FragmentGammaBeta>
struct LayernormScaleBiasTransform {
using T = typename FragmentActivations::Element;
static int const NumActivations = FragmentActivations::kElements;
static int const NumVarMean = FragmentVarMean::kElements;
static int const NumGammaBeta = FragmentGammaBeta::kElements;
static int const MmaElements = 2;
// One element has one scale and one bias
static int const MmaScaleBiasPair = 2;
// 16816 has 2 columns and 2 rows
static int const MmaCols = 2;
static int const MmaRows = 2;
using MmaOperand = Array<T, MmaElements>;
using VarMeanOperand = Array<__half2, MmaScaleBiasPair>;
using GammaBetaOperand = Array<T, MmaElements * MmaScaleBiasPair>;
CUTLASS_DEVICE
void transform(MmaOperand &activations,
VarMeanOperand const &var_mean,
GammaBetaOperand const &gamma_beta) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
uint32_t *ptr_activations = reinterpret_cast<uint32_t *>(&activations);
uint32_t const *ptr_var_mean = reinterpret_cast<uint32_t const *>(&var_mean);
uint32_t const *ptr_gamma_beta = reinterpret_cast<uint32_t const *>(&gamma_beta);
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
// We assumes the pair of FP16 are either both inbound or both out-of-bound.
// It requires C to be an even number.
asm volatile(
"{\n\t"
" fma.rn.f16x2 %0, %1, %2, %3;\n"
" fma.rn.f16x2 %0, %4, %0, %5;\n"
"}\n"
: "=r"(ptr_activations[0])
: "r"(ptr_var_mean[0]), "r"(ptr_activations[0]),
"r"(ptr_var_mean[1]),
"r"(ptr_gamma_beta[0]), "r"(ptr_gamma_beta[1]));
#else
assert(0);
#endif
}
CUTLASS_DEVICE
void operator()(FragmentActivations &activations,
FragmentVarMean const &var_mean,
FragmentGammaBeta const &gamma_beta) {
MmaOperand *ptr_activations = reinterpret_cast<MmaOperand *>(&activations);
VarMeanOperand const *ptr_var_mean =
reinterpret_cast<VarMeanOperand const *>(&var_mean);
GammaBetaOperand const *ptr_gamma_beta =
reinterpret_cast<GammaBetaOperand const *>(&gamma_beta);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < (NumActivations / MmaElements); ++i) {
transform(ptr_activations[i],
ptr_var_mean[i / (MmaCols * MmaRows) * MmaRows + i % MmaRows],
ptr_gamma_beta[(i / MmaScaleBiasPair) % MmaCols]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/layernorm_scale_bias_transform.h/0 | {
"file_path": "include/cutlass/gemm/warp/layernorm_scale_bias_transform.h",
"repo_id": "include",
"token_count": 1939
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a class for using integer types smaller than one byte in host or
device code.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cstdint>
#else
#include <cstdint>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_size.h"
#include "cutlass/platform/platform.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <int Bits, bool Signed = true>
struct integer_subbyte {
/// Storage type
using Storage = uint8_t;
/// Number of bits
static_assert(Bits <= 8*sizeof(Storage), "Require a subbyte of bits in integer_subbyte");
/// External type
using xint_t = typename platform::conditional<Signed, int, unsigned>::type;
/// Bitmask for truncation from larger integers
static constexpr Storage bits_mask_ = Storage(Storage(-1) >> (8 - Bits));
/// Bitmask for the sign bit
static constexpr Storage sign_mask_ = Storage((Signed ? 1 : 0) << (Bits - 1));
//
// Data members
//
Storage storage;
//
// Methods
//
/// No operation
integer_subbyte() = default;
/// Conversion from integer type
CUTLASS_HOST_DEVICE explicit
integer_subbyte(int value)
: storage(reinterpret_cast<Storage const&>(value) & bits_mask_) {}
CUTLASS_HOST_DEVICE explicit
integer_subbyte(unsigned value)
: storage(reinterpret_cast<Storage const&>(value) & bits_mask_) {}
CUTLASS_HOST_DEVICE explicit
integer_subbyte(double value) {
xint_t tmp = static_cast<xint_t>(value);
storage = reinterpret_cast<Storage const &>(tmp) & bits_mask_;
}
/// Convert to int or unsigned
CUTLASS_HOST_DEVICE
operator xint_t() const {
if (sign_mask_ & storage) { // Sign extend
return xint_t(storage) | ~xint_t(bits_mask_);
} else {
return xint_t(storage);
}
}
/// Equality
CUTLASS_HOST_DEVICE
bool operator==(integer_subbyte const& rhs) const {
return storage == rhs.storage;
}
/// Inequality
CUTLASS_HOST_DEVICE
bool operator!=(integer_subbyte const& rhs) const {
return storage != rhs.storage;
}
/// Less than or equal
CUTLASS_HOST_DEVICE
bool operator<=(integer_subbyte const& rhs) const {
if (sign_mask_ & storage) {
return !(rhs.storage < storage);
} else {
return storage <= rhs.storage;
}
}
/// Less than
CUTLASS_HOST_DEVICE
bool operator<(integer_subbyte const& rhs) const {
if (sign_mask_ & storage) {
return !(rhs.storage <= storage);
} else {
return storage < rhs.storage;
}
}
/// Greater than or equal
CUTLASS_HOST_DEVICE
bool operator>=(integer_subbyte const& rhs) const {
return !(*this < rhs);
}
/// Greater than
CUTLASS_HOST_DEVICE
bool operator>(integer_subbyte const& rhs) const {
return !(*this <= rhs);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// 1-bit Unsigned integer type
using uint1b_t = integer_subbyte<1, false>;
/// 2-bit Integer type
using int2b_t = integer_subbyte<2, true>;
/// 2-bit Unsigned integer type
using uint2b_t = integer_subbyte<2, false>;
/// 4-bit Integer type
using int4b_t = integer_subbyte<4, true>;
/// 4-bit Unsigned integer type
using uint4b_t = integer_subbyte<4, false>;
/// 1-bit binary type
using bin1_t = bool;
///////////////////////////////////////////////////////////////////////////////////////////////////
template <int Bits, bool Signed>
struct sizeof_bits<integer_subbyte<Bits,Signed>> {
static constexpr int value = Bits;
};
/// Defines the size of an element in bits - specialized for bin1_t
template <>
struct sizeof_bits<bin1_t> {
static constexpr int value = 1;
};
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace platform {
template <>
struct numeric_limits<cutlass::int4b_t> {
CUTLASS_HOST_DEVICE static
cutlass::int4b_t const lowest() noexcept { return int4b_t{-8};}
CUTLASS_HOST_DEVICE static
cutlass::int4b_t const max() noexcept { return int4b_t{7};}
CUTLASS_HOST_DEVICE static
cutlass::int4b_t const min() noexcept { return lowest();}
static constexpr bool is_integer = true;
static constexpr bool is_signed = true;
};
template <>
struct numeric_limits<cutlass::uint4b_t> {
CUTLASS_HOST_DEVICE static
cutlass::uint4b_t const lowest() noexcept { return uint4b_t{0};}
CUTLASS_HOST_DEVICE static
cutlass::uint4b_t const max() noexcept { return uint4b_t{15};}
CUTLASS_HOST_DEVICE static
cutlass::uint4b_t const min() noexcept { return lowest();}
static constexpr bool is_integer = true;
static constexpr bool is_signed = false;
};
template <>
struct numeric_limits<cutlass::uint1b_t> {
CUTLASS_HOST_DEVICE static
cutlass::uint1b_t const lowest() noexcept { return uint1b_t{0};}
CUTLASS_HOST_DEVICE static
cutlass::uint1b_t const max() noexcept { return uint1b_t{1};}
CUTLASS_HOST_DEVICE static
cutlass::uint1b_t const min() noexcept { return lowest();}
static constexpr bool is_integer = true;
static constexpr bool is_signed = false;
};
template <>
struct numeric_limits<cutlass::int2b_t> {
CUTLASS_HOST_DEVICE static
cutlass::int2b_t lowest() noexcept { return int2b_t{-2}; }
CUTLASS_HOST_DEVICE static
cutlass::int2b_t min() noexcept { return lowest(); }
CUTLASS_HOST_DEVICE static
cutlass::int2b_t max() noexcept { return int2b_t{1}; }
static constexpr bool is_integer = true;
static constexpr bool is_signed = true;
};
template <>
struct numeric_limits<cutlass::uint2b_t> {
CUTLASS_HOST_DEVICE static
cutlass::uint2b_t const lowest() noexcept { return uint2b_t{0}; }
CUTLASS_HOST_DEVICE static
cutlass::uint2b_t const min() noexcept { return lowest(); }
CUTLASS_HOST_DEVICE static
cutlass::uint2b_t const max() noexcept { return uint2b_t{3}; }
static constexpr bool is_integer = true;
static constexpr bool is_signed = false;
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace platform
} // namespace cutlass
| include/cutlass/integer_subbyte.h/0 | {
"file_path": "include/cutlass/integer_subbyte.h",
"repo_id": "include",
"token_count": 2547
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Boost-like numeric conversion operator for CUTLASS numeric types
*/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <cfenv>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/thread/unary_op.h"
#include "cutlass/array.h"
#include "cutlass/half.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Floating-point rounding style similare to Standard Library's formats but supporting
/// additional rounding options.
enum class FloatRoundStyle {
round_indeterminate, ///< rounding mode unknown
round_toward_zero, ///< round toward zero
round_to_nearest, ///< round to nearest even
round_to_nearest_satfinite, ///< round to nearest even, capping value to min and max of destination type
round_toward_infinity, ///< round toward infinity
round_toward_neg_infinity, ///< round toward negative infinity
round_half_ulp_truncate, ///< add 0.5ulp to integer representation then round toward zero
round_half_ulp_trunc_dntz ///< like round_half_ulp_truncate, except denorms are rounded *toward* zero
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename T,
typename S,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
struct NumericConverter {
using result_type = T;
using source_type = S;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return static_cast<result_type>(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float => int32_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDA_ARCH__)
template <>
struct NumericConverter<int32_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = int32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
return __float2int_rn(s);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<int32_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = int32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
return __float2int_rz(s);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#elif !defined(__CUDACC_RTC__)
template <>
struct NumericConverter<int32_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = int32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
static result_type convert(source_type const & s) {
std::fesetround(FE_TONEAREST);
return (result_type)std::nearbyint(s);
}
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<int32_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = int32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
static result_type convert(source_type const & s) {
std::fesetround(FE_TOWARDZERO);
return (result_type)std::nearbyint(s);
}
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float => int8_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDA_ARCH__)
template <>
struct NumericConverter<int8_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = int8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
int32_t intermediate;
asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(intermediate) : "f"(s));
return static_cast<result_type>(intermediate);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<int8_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = int8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
int32_t intermediate;
asm volatile("cvt.rzi.sat.s8.f32 %0, %1;" : "=r"(intermediate) : "f"(s));
return static_cast<result_type>(intermediate);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#elif !defined(__CUDACC_RTC__)
template <>
struct NumericConverter<int8_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = int8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
static result_type convert(source_type const & s) {
std::fesetround(FE_TONEAREST);
int32_t intermediate = (int32_t)std::nearbyint(s);
// Low-end saturation
intermediate = std::max(intermediate, (int32_t)std::numeric_limits<int8_t>::lowest());
// High-end saturation
intermediate = std::min(intermediate, (int32_t)std::numeric_limits<int8_t>::max());
return static_cast<result_type>(intermediate);
}
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<int8_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = int8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
static result_type convert(source_type const & s) {
std::fesetround(FE_TOWARDZERO);
int32_t intermediate = (int32_t)std::nearbyint(s);
// Low-end saturation
intermediate = std::max(intermediate, (int32_t)std::numeric_limits<int8_t>::lowest());
// High-end saturation
intermediate = std::min(intermediate, (int32_t)std::numeric_limits<int8_t>::max());
return static_cast<result_type>(intermediate);
}
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float <= cutlass::half_t
template <typename T, FloatRoundStyle Round>
struct NumericConverter<T, T, Round> {
using result_type = T;
using source_type = T;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return s;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float <=> cutlass::half_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float <= cutlass::half_t
template <FloatRoundStyle Round>
struct NumericConverter<float, cutlass::half_t, Round> {
using result_type = float;
using source_type = cutlass::half_t;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
result_type result = static_cast<float>(s);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Specialization for round-to-nearest
template <>
struct NumericConverter<cutlass::half_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = cutlass::half_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
result_type result = static_cast<cutlass::half_t>(s);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Specialization for round-toward-zero
template <>
struct NumericConverter<cutlass::half_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = cutlass::half_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
/// Round toward zero
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & flt) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return cutlass::half_t(__float2half_rz(flt));
#else
// software implementation rounds toward nearest even
unsigned const& s = reinterpret_cast<unsigned const &>(flt);
uint16_t sign = uint16_t((s >> 16) & 0x8000);
int32_t exp = int32_t((s >> 23) & 0xff) - 127;
int mantissa = s & 0x7fffff;
uint16_t u = 0;
if ((s & 0x7fffffff) == 0) {
// sign-preserving zero
return cutlass::half_t::bitcast(sign);
}
if (exp > 15) {
if (exp == 128 && mantissa) {
// not a number
u = 0x7fff;
} else {
// overflow to infinity
u = sign | 0x7c00;
}
return cutlass::half_t::bitcast(u);
}
if (exp >= -14) {
// normal fp32 to normal fp16
u = uint16_t((uint32_t(exp + 15) & 0x1f) << 10);
u = uint16_t(u | (mantissa >> 13));
} else {
// normal single-precision to subnormal cutlass::half_t-precision representation
int rshift = (-14 - exp);
if (rshift < 32) {
mantissa |= (1 << 23);
mantissa = (mantissa >> rshift);
u = (uint16_t(mantissa >> 13) & 0x3ff);
} else {
mantissa = 0;
u = 0;
}
}
u |= sign;
return cutlass::half_t::bitcast(u);
#endif // defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float <=> cutlass::bfloat16_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float <= cutlass::bfloat16_t
template <FloatRoundStyle Round>
struct NumericConverter<float, cutlass::bfloat16_t, Round> {
using result_type = float;
using source_type = cutlass::bfloat16_t;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return static_cast<float>(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<cutlass::bfloat16_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = cutlass::bfloat16_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return static_cast<cutlass::bfloat16_t>(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<cutlass::bfloat16_t, float, FloatRoundStyle::round_half_ulp_truncate> {
using result_type = cutlass::bfloat16_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_half_ulp_truncate;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
uint32_t x32 = reinterpret_cast<uint32_t const &>(s);
#if defined(__CUDA_ARCH__)
if (::isfinite(s)) {
x32 += 0x8000;
}
#else
if (std::isfinite(s)) {
x32 += 0x8000;
}
#endif
uint16_t x16 = uint16_t((x32 >> 16) & 0xffff);
return cutlass::bfloat16_t::bitcast(x16);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<cutlass::bfloat16_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = cutlass::bfloat16_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
uint32_t x32 = reinterpret_cast<uint32_t const &>(s);
uint16_t x16 = uint16_t(x32 >> 16);
return cutlass::bfloat16_t::bitcast(x16);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float <=> cutlass::tfloat32_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float <= cutlass::tfloat32_t
template <FloatRoundStyle Round>
struct NumericConverter<float, cutlass::tfloat32_t, Round> {
using result_type = float;
using source_type = cutlass::tfloat32_t;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return static_cast<float>(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<cutlass::tfloat32_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = cutlass::tfloat32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
unsigned storage = reinterpret_cast<unsigned const &>(s);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 900
asm volatile("cvt.rn.tf32.f32 %0, %1;" : "=r"(storage) : "r"(storage));
#else
if ((storage & 0x7f800000) != 0x7f800000) {
bool mantissa_bit = ((storage & (1 << 13)) != 0);
bool round_bit = ((storage & (1 << 12)) != 0);
bool sticky_bit = ((storage & ((1 << 12) - 1)) != 0);
if ((round_bit && sticky_bit) || (round_bit && mantissa_bit)) {
storage += uint32_t(1 << 13);
}
// Note, the following is intentionally commented out. TF32
// does not define the low order bits, so they may be left in
// an undefined state.
//
// By not truncating these bit explicitly, we avoid an extra logical
// operation.
//
// TF32 may be implicitly converted to float by performing this
// operation as needed.
//
// storage = (storage & ~0x1fff);
}
else if (storage & ~0xff800000) {
storage = 0x7fffffff;
}
#endif
return cutlass::tfloat32_t::bitcast(storage);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<cutlass::tfloat32_t, float, FloatRoundStyle::round_half_ulp_truncate> {
using result_type = cutlass::tfloat32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_half_ulp_truncate;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return cutlass::tfloat32_t::round_half_ulp_truncate(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// This rounding operation is similar to half_ulp_truncate except it rounds denorms toward zero.
/// It avoids predicated code, though it requires a temporary register.
template <>
struct NumericConverter<cutlass::tfloat32_t, float, FloatRoundStyle::round_half_ulp_trunc_dntz> {
using result_type = cutlass::tfloat32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_half_ulp_trunc_dntz;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
unsigned y = reinterpret_cast<unsigned const &>(s);
y = y & 0xff800000;
float d = reinterpret_cast<float const &>(y);
float z = d / float(1 << 11) + s;
return reinterpret_cast<result_type const &>(z);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<cutlass::tfloat32_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = cutlass::tfloat32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
uint32_t x = reinterpret_cast<uint32_t const &>(s);
return cutlass::tfloat32_t::bitcast(x & 0xffffe000);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Conversion operator for float to cutlass::tfloat32_t big and small values
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
FloatRoundStyle RoundBig = FloatRoundStyle::round_toward_zero,
FloatRoundStyle RoundSmall = FloatRoundStyle::round_half_ulp_truncate
>
struct NumericConverterFastF32 {
// result_type holds big cutlass::tfloat32_t at idx(0) and small cutlass::tfloat32_t at idx(1)
using result_type = Array<cutlass::tfloat32_t, 2>;
// source data type
using source_type = float;
// rounding styles for big and small part
static FloatRoundStyle const kRoundBig = RoundBig;
static FloatRoundStyle const kRoundSmall = RoundSmall;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
result_type result;
NumericConverter<cutlass::tfloat32_t, float, kRoundBig> convert_big_;
NumericConverter<cutlass::tfloat32_t, float, kRoundSmall> convert_small_;
// convert and fill cutlass::tfloat32_t big at idx 0
result[0] = convert_big_(source);
// convert and fill cutlass::tfloat32_t small at idx 1
result[1] = convert_small_(source - static_cast<float>(result[0]));
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Conversion and Clamp operator for Integers
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename T,
typename S
>
struct NumericConverterClamp {
using result_type = T;
using source_type = S;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
NumericConverter<result_type, source_type> convert_op;
result_type const kClamp_max = platform::numeric_limits<result_type>::max();
result_type const kClamp_min = platform::numeric_limits<result_type>::lowest();
if (s < (source_type)kClamp_min)
return kClamp_min;
if (s > (source_type)kClamp_max)
return kClamp_max;
return convert_op(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
// This converter is needed to enable cutlass::half_t output types when using int32_t accumulators.
// Since floating-point types do not require a clamp, this converter simply casts from
// the source type to cutlass::half_t.
template <
typename S
>
struct NumericConverterClamp<cutlass::half_t, S> {
using result_type = cutlass::half_t;
using source_type = S;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const &source) {
return static_cast<cutlass::half_t>(source);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Conversion operator for Array
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Conversion operator for Array
template <
typename T,
typename S,
int N,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
typename Transform = cutlass::transform::thread::UnaryTransform::Identity
>
struct NumericArrayConverter {
using result_type = Array<T, N>;
using source_type = Array<S, N>;
static FloatRoundStyle const round_style = Round;
static_assert(platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value ||
platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
result_type result;
NumericConverter<T, S, Round> convert_;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
if (platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value) {
result[i] = convert_(s[i]);
} else { // conjugate
result[i] = conj(convert_(s[i]));
}
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <
typename T,
int N,
FloatRoundStyle Round,
typename Transform
>
struct NumericArrayConverter<T, T, N, Round, Transform> {
using result_type = Array<T, N>;
using source_type = Array<T, N>;
static FloatRoundStyle const round_style = Round;
static_assert(platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value ||
platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
CUTLASS_HOST_DEVICE
static result_type convert(source_type const &source) {
if (platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value) {
return source;
} else {
result_type result;
for (int i = 0; i < N; ++i) {
result[i] = conj(source[i]);
}
return result;
}
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<half, 2> <= Array<float, 2>, round to nearest
template <>
struct NumericArrayConverter<cutlass::half_t, float, 2, FloatRoundStyle::round_to_nearest> {
using result_type = Array<cutlass::half_t, 2>;
using source_type = Array<float, 2>;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
Array<cutlass::half_t, 2> result;
reinterpret_cast<__half2 &>(result) = __float22half2_rn(reinterpret_cast<float2 const &>(source));
return result;
#else
NumericConverter<cutlass::half_t, float, round_style> convert_;
// NOTE: cutlass::Array<half, N> is NOT an aggregate type and
// below `{}` does NOT conduct zero initialization. Below `{}` will
// conduct default initialization (calling default ctr). We use this syntax
// to resolve compiler warning on uninitialized member variable.
Array<cutlass::half_t, 2> result{};
result[0] = convert_(source[0]);
result[1] = convert_(source[1]);
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float, 2> <= Array<cutlass::half_t, 2>, round to nearest
template <FloatRoundStyle Round>
struct NumericArrayConverter<float, cutlass::half_t, 2, Round> {
using result_type = Array<float, 2>;
using source_type = Array<cutlass::half_t, 2>;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
float2 result2 = __half22float2(reinterpret_cast<__half2 const &>(source));
return {
float{result2.x},
float{result2.y}
};
#else
NumericConverter<float, cutlass::half_t, round_style> convert_;
return {
convert_(source[0]),
convert_(source[1])
};
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<half> <= Array<float>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<cutlass::half_t, float, N, Round> {
using result_type = Array<cutlass::half_t, N>;
using source_type = Array<float, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<cutlass::half_t, float, 2, Round> convert_vector_;
NumericConverter<cutlass::half_t, float, Round> convert_element_;
result_type result;
Array<cutlass::half_t, 2> *result_ptr = reinterpret_cast<Array<cutlass::half_t, 2> *>(&result);
Array<float, 2> const *source_ptr = reinterpret_cast<Array<float, 2> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
if (N % 2) {
result[N - 1] = convert_element_(source[N - 1]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<half> <= Array<float>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float, cutlass::half_t, N, Round> {
using result_type = Array<float, N>;
using source_type = Array<cutlass::half_t, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<float, cutlass::half_t, 2, Round> convert_vector_;
NumericConverter<float, cutlass::half_t, Round> convert_element_;
result_type result;
Array<float, 2> *result_ptr = reinterpret_cast<Array<float, 2> *>(&result);
Array<cutlass::half_t, 2> const *source_ptr = reinterpret_cast<Array<cutlass::half_t, 2> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
if (N % 2) {
result[N - 1] = convert_element_(source[N - 1]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::bfloat16_t, 2> <= Array<float, 2>, round to nearest
template <>
struct NumericArrayConverter<cutlass::bfloat16_t, float, 2, FloatRoundStyle::round_to_nearest> {
using result_type = Array<cutlass::bfloat16_t, 2>;
using source_type = Array<float, 2>;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned d;
asm("cvt.rn.bf16x2.f32 %0, %1, %2;\n" : "=r"(d) : "f"(source[1]), "f"(source[0]) );
return reinterpret_cast<result_type const &>(d);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<cutlass::bfloat16_t> <= Array<float>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<cutlass::bfloat16_t, float, N, Round> {
using result_type = Array<cutlass::bfloat16_t, N>;
using source_type = Array<float, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<cutlass::bfloat16_t, float, 2, Round> convert_vector_;
NumericConverter<cutlass::bfloat16_t, float, Round> convert_element_;
result_type result;
Array<cutlass::bfloat16_t, 2> *result_ptr = reinterpret_cast<Array<cutlass::bfloat16_t, 2> *>(&result);
Array<float, 2> const *source_ptr = reinterpret_cast<Array<float, 2> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
if (N % 2) {
result[N - 1] = convert_element_(source[N - 1]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#endif // if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conditional guards to enable partial specialization for packed integers
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && \
((__CUDACC_VER_MAJOR__ > 10) || \
((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2)))
/// Partial specialization for Array<int8_t, 1> <= Array<int, 1>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, int, 1, Round> {
using result_type = Array<int8_t, 1>;
using source_type = Array<int, 1>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericConverter<int8_t, int, Round> convert_element_;
result_type result;
result[0] = convert_element_(source[0]);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<int8_t, 2> <= Array<int, 2>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, int, 2, Round> {
using result_type = Array<int8_t, 2>;
using source_type = Array<int, 2>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
uint32_t tmp;
asm volatile(
"cvt.pack.sat.s8.s32.b32 %0, %2, %1, 0;\n"
: "=r"(tmp) : "r"(source[0]), "r"(source[1]));
uint16_t out = (tmp & 0xffff);
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<int8_t, 4> <= Array<int, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, int, 4, Round> {
using result_type = Array<int8_t, 4>;
using source_type = Array<int, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned out;
asm volatile(
"{ .reg .u32 r4;"
"cvt.pack.sat.s8.s32.b32 r4, %4, %3, 0;"
"cvt.pack.sat.s8.s32.b32 %0, %2, %1, r4;"
"}"
: "=r"(out) : "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]));
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<int8_t> <= Array<int>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, int, N, Round> {
static_assert(!(N % 4), "N must be multiple of 4.");
using result_type = Array<int8_t, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<int8_t, int, 4, Round> convert_vector_;
result_type result;
Array<int8_t, 4> *result_ptr = reinterpret_cast<Array<int8_t, 4> *>(&result);
Array<int, 4> const *source_ptr = reinterpret_cast<Array<int, 4> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 4; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<uint8_t, 1> <= Array<int, 1>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, int, 1, Round> {
using result_type = Array<uint8_t, 1>;
using source_type = Array<int, 1>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericConverter<uint8_t, int, Round> convert_element_;
result_type result;
result[0] = convert_element_(source[0]);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<uint8_t, 2> <= Array<int, 2>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, int, 2, Round> {
using result_type = Array<uint8_t, 2>;
using source_type = Array<int, 2>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
uint32_t tmp;
asm volatile(
"cvt.pack.sat.u8.s32.b32 %0, %2, %1, 0;\n"
: "=r"(tmp) : "r"(source[0]), "r"(source[1]));
uint16_t out = (tmp & 0xffff);
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<uint8_t, 4> <= Array<int, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, int, 4, Round> {
using result_type = Array<uint8_t, 4>;
using source_type = Array<int, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned out;
asm volatile(
"{ .reg .u32 r4;"
"cvt.pack.sat.u8.s32.b32 r4, %4, %3, 0;"
"cvt.pack.sat.u8.s32.b32 %0, %2, %1, r4;"
"}"
: "=r"(out) : "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]));
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<int8_t> <= Array<int>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, int, N, Round> {
static_assert(!(N % 4), "N must be multiple of 4.");
using result_type = Array<uint8_t, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<uint8_t, int, 4, Round> convert_vector_;
result_type result;
Array<uint8_t, 4> *result_ptr = reinterpret_cast<Array<uint8_t, 4> *>(&result);
Array<int, 4> const *source_ptr = reinterpret_cast<Array<int, 4> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 4; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<float, N> <=> Array<float_e4m3_t, N>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<float, 2> <= Array<float_e4m3_t, 2>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float, cutlass::float_e4m3_t, 2, Round> {
using result_element = float;
using source_element = cutlass::float_e4m3_t;
using result_type = Array<result_element, 2>;
using source_type = Array<source_element, 2>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out_fp16;
uint16_t const& src_packed = reinterpret_cast<uint16_t const&>(source);
asm volatile( \
"{\n" \
"cvt.rn.f16x2.e4m3x2 %0, %1;\n" \
"}\n" : "=r"(out_fp16): "h"(src_packed));
float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16));
result_type out;
out[0] = res0.x;
out[1] = res0.y;
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e4m3_t, 2> <= Array<float, 2>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, float, 2, Round> {
using result_element = cutlass::float_e4m3_t;
using source_element = float;
using result_type = Array<result_element, 2>;
using source_type = Array<source_element, 2>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t out;
asm volatile( \
"{\n" \
"cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;\n" \
"}" \
: "=h"(out) : "f"(source[0]), "f"(source[1]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float, 2> <= Array<float_e5m2_t, 2>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float, cutlass::float_e5m2_t, 2, Round> {
using result_element = float;
using source_element = cutlass::float_e5m2_t;
using result_type = Array<result_element, 2>;
using source_type = Array<source_element, 2>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out_fp16;
uint16_t const& src_packed = reinterpret_cast<uint16_t const&>(source);
asm volatile( \
"{\n" \
"cvt.rn.f16x2.e5m2x2 %0, %1;\n" \
"}\n" : "=r"(out_fp16): "h"(src_packed));
float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16));
result_type out;
out[0] = res0.x;
out[1] = res0.y;
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
namespace detail {
/// Special converters that can be used with 4 8-bit elements packed in a register.
/// Common use is for fast FP8 converters.
template <
typename T,
typename S,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
typename Transform = cutlass::transform::thread::UnaryTransform::Identity
>
struct NumericArrayConverterPacked4Element {
using result_type = Array<T, 4>;
using source_type = Array<S, 4>;
static FloatRoundStyle const round_style = Round;
static_assert(platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value ||
platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
result_type result;
NumericConverter<T, S, Round> convert_;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
if (platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value) {
result[i] = convert_(s[i]);
}
else { // conjugate
result[i] = conj(convert_(s[i]));
}
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float, 4> <= Array<float_e4m3_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float, cutlass::float_e4m3_t, Round> {
using result_element = float;
using source_element = cutlass::float_e4m3_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out_fp16[2];
uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source);
asm volatile( \
"{\n" \
".reg .b16 lo, hi;\n" \
"mov.b32 {lo, hi}, %2;\n" \
"cvt.rn.f16x2.e4m3x2 %0, lo;\n" \
"cvt.rn.f16x2.e4m3x2 %1, hi;\n" \
"}\n" : "=r"(out_fp16[0]), "=r"(out_fp16[1]) : "r"(src_packed));
float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[0]));
float2 res1 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[1]));
result_type out;
out[0] = res0.x;
out[1] = res0.y;
out[2] = res1.x;
out[3] = res1.y;
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e4m3_t, 4> <= Array<float, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e4m3_t, float, Round> {
using result_element = cutlass::float_e4m3_t;
using source_element = float;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out;
asm volatile( \
"{\n" \
".reg .b16 lo;\n" \
".reg .b16 hi;\n" \
"cvt.rn.satfinite.e4m3x2.f32 lo, %2, %1;\n" \
"cvt.rn.satfinite.e4m3x2.f32 hi, %4, %3;\n" \
"mov.b32 %0, {lo, hi};\n" \
"}" \
: "=r"(out) : "f"(source[0]), "f"(source[1]), "f"(source[2]), "f"(source[3]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<float, 4> <=> Array<float_e5m2_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<float, 4> <= Array<float_e5m2_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float, cutlass::float_e5m2_t, Round> {
using result_element = float;
using source_element = cutlass::float_e5m2_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out_fp16[2];
uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source);
asm volatile( \
"{\n" \
".reg .b16 lo, hi;\n" \
"mov.b32 {lo, hi}, %2;\n" \
"cvt.rn.f16x2.e5m2x2 %0, lo;\n" \
"cvt.rn.f16x2.e5m2x2 %1, hi;\n" \
"}\n" : "=r"(out_fp16[0]), "=r"(out_fp16[1]) : "r"(src_packed));
float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[0]));
float2 res1 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[1]));
result_type out;
out[0] = res0.x;
out[1] = res0.y;
out[2] = res1.x;
out[3] = res1.y;
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e5m2_t, 4> <= Array<float, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e5m2_t, float, Round> {
using result_element = cutlass::float_e5m2_t;
using source_element = float;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out;
asm volatile( \
"{\n" \
".reg .b16 lo;\n" \
".reg .b16 hi;\n" \
"cvt.rn.satfinite.e5m2x2.f32 lo, %2, %1;\n" \
"cvt.rn.satfinite.e5m2x2.f32 hi, %4, %3;\n" \
"mov.b32 %0, {lo, hi};\n" \
"}" \
: "=r"(out) : "f"(source[0]), "f"(source[1]), "f"(source[2]), "f"(source[3]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<cutlass::half_t, 4> <=> Array<float_e4m3_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::half_t, 4> <= Array<float_e4m3_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<cutlass::half_t, cutlass::float_e4m3_t, Round> {
using result_element = cutlass::half_t;
using source_element = cutlass::float_e4m3_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out[2];
uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source);
asm volatile( \
"{\n" \
".reg .b16 lo, hi;\n" \
"mov.b32 {lo, hi}, %2;\n" \
"cvt.rn.f16x2.e4m3x2 %0, lo;\n" \
"cvt.rn.f16x2.e4m3x2 %1, hi;\n" \
"}\n" : "=r"(out[0]), "=r"(out[1]) : "r"(src_packed));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e4m3_t, 4> <= Array<cutlass::half_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e4m3_t, cutlass::half_t, Round> {
using result_element = cutlass::float_e4m3_t;
using source_element = cutlass::half_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out;
uint32_t const* src_packed = reinterpret_cast<uint32_t const*>(&source);
asm volatile( \
"{\n" \
".reg .b16 lo;\n" \
".reg .b16 hi;\n" \
"cvt.rn.satfinite.e4m3x2.f16x2 lo, %1;\n" \
"cvt.rn.satfinite.e4m3x2.f16x2 hi, %2;\n" \
"mov.b32 %0, {lo, hi};\n" \
"}" \
: "=r"(out) : "r"(src_packed[0]), "r"(src_packed[1]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<cutlass::half_t, 4> <=> Array<float_e5m2_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::half_t, 4> <= Array<float_e5m2_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<cutlass::half_t, cutlass::float_e5m2_t, Round> {
using result_element = cutlass::half_t;
using source_element = cutlass::float_e5m2_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out[2];
uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source);
asm volatile( \
"{\n" \
".reg .b16 lo, hi;\n" \
"mov.b32 {lo, hi}, %2;\n" \
"cvt.rn.f16x2.e5m2x2 %0, lo;\n" \
"cvt.rn.f16x2.e5m2x2 %1, hi;\n" \
"}\n" : "=r"(out[0]), "=r"(out[1]) : "r"(src_packed));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e5m2_t, 4> <= Array<cutlass::half_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e5m2_t, cutlass::half_t, Round> {
using result_element = cutlass::float_e5m2_t;
using source_element = cutlass::half_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out;
uint32_t const* src_packed = reinterpret_cast<uint32_t const*>(&source);
asm volatile( \
"{\n" \
".reg .b16 lo;\n" \
".reg .b16 hi;\n" \
"cvt.rn.satfinite.e5m2x2.f16x2 lo, %1;\n" \
"cvt.rn.satfinite.e5m2x2.f16x2 hi, %2;\n" \
"mov.b32 %0, {lo, hi};\n" \
"}" \
: "=r"(out) : "r"(src_packed[0]), "r"(src_packed[1]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<cutlass::bfloat16_t, 4> <=> Array<float_e4m3_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::bfloat16_t, 4> <= Array<float_e4m3_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<cutlass::bfloat16_t, cutlass::float_e4m3_t, Round> {
using result_element = cutlass::bfloat16_t;
using source_element = cutlass::float_e4m3_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
// Convert f8 to float
NumericArrayConverterPacked4Element<float, source_element, Round> src2float;
Array<float, 4> tmp_floats = src2float(source);
// Convert float to bf16
result_type out;
Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp_floats);
Array<result_element, 2>* packed_out = reinterpret_cast<Array<result_element, 2>*>(&out);
NumericArrayConverter<result_element, float, 2, Round> float2result;
packed_out[0] = float2result(packed_tmp[0]);
packed_out[1] = float2result(packed_tmp[1]);
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e4m3_t, 4> <= Array<cutlass::bfloat16_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e4m3_t, cutlass::bfloat16_t, Round> {
using result_element = cutlass::float_e4m3_t;
using source_element = cutlass::bfloat16_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
// Convert bf16 to float
Array<float, 4> tmp;
Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp);
Array<source_element, 2> const* packed_source = reinterpret_cast<Array<source_element, 2> const*>(&source);
NumericArrayConverter<float, source_element, 2, Round> src2float;
packed_tmp[0] = src2float(packed_source[0]);
packed_tmp[1] = src2float(packed_source[1]);
// Convert float to f8
NumericArrayConverterPacked4Element<result_element, float, Round> float2result;
return float2result(tmp);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<cutlass::bfloat16_t, 4> <=> Array<float_e5m2_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::bfloat16_t, 4> <= Array<float_e5m2_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<cutlass::bfloat16_t, cutlass::float_e5m2_t, Round> {
using result_element = cutlass::bfloat16_t;
using source_element = cutlass::float_e5m2_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
// Convert f8 to float
NumericArrayConverterPacked4Element<float, source_element, Round> src2float;
Array<float, 4> tmp_floats = src2float(source);
// Convert float to bf16
result_type out;
Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp_floats);
Array<result_element, 2>* packed_out = reinterpret_cast<Array<result_element, 2>*>(&out);
NumericArrayConverter<result_element, float, 2, Round> float2result;
packed_out[0] = float2result(packed_tmp[0]);
packed_out[1] = float2result(packed_tmp[1]);
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e5m2_t, 4> <= Array<cutlass::bfloat16_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e5m2_t, cutlass::bfloat16_t, Round> {
using result_element = cutlass::float_e5m2_t;
using source_element = cutlass::bfloat16_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
// Convert bf16 to float
Array<float, 4> tmp;
Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp);
Array<source_element, 2> const* packed_source = reinterpret_cast<Array<source_element, 2> const*>(&source);
NumericArrayConverter<float, source_element, 2, Round> src2float;
packed_tmp[0] = src2float(packed_source[0]);
packed_tmp[1] = src2float(packed_source[1]);
// Convert float to f8
NumericArrayConverterPacked4Element<result_element, float, Round> float2result;
return float2result(tmp);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<float_e4m3_t, 4> <=> Array<float_e5m2_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<float_e4m3_t, 4> <= Array<float_e5m2_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e4m3_t, cutlass::float_e5m2_t, Round> {
using result_element = cutlass::float_e4m3_t;
using source_element = cutlass::float_e5m2_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e5m2_t, 4> <= Array<float_e4m3_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e5m2_t, cutlass::float_e4m3_t, Round> {
using result_element = cutlass::float_e5m2_t;
using source_element = cutlass::float_e4m3_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for:
// Array<T, N> <=> Array<float_e4m3_t, N>
// Array<T, N> <=> Array<float_e5m2_t, N>
// using packed converter under the hood
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename T,
typename S,
int N,
FloatRoundStyle Round
>
struct PackedNumericArrayConverter {
using result_element = T;
using source_element = S;
using result_type = Array<result_element, N>;
using source_type = Array<source_element, N>;
static FloatRoundStyle const round_style = Round;
private:
using packed_result_type = Array<result_element, 4>;
using packed_source_type = Array<source_element, 4>;
public:
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
result_type result;
packed_result_type* packed_result = reinterpret_cast<packed_result_type*>(&result);
const packed_source_type* packed_source = reinterpret_cast<const packed_source_type*>(&source);
detail::NumericArrayConverterPacked4Element<result_element, source_element, Round> packed_converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 4; ++i) {
packed_result[i] = packed_converter(packed_source[i]);
}
// Handle leftovers
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N % 4; ++i) {
int idx = ((N / 4) * 4) + i;
result[idx] = converter(source[idx]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const{
return convert(s);
}
};
/// Partial specialization for Array<T, N> <= Array<float_e4m3_t, N>
template <
typename T,
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<T, cutlass::float_e4m3_t, N, Round> :
public PackedNumericArrayConverter<T, cutlass::float_e4m3_t, N, Round> {};
/// Partial specialization for Array<T, N> <= Array<float_e5m2_t, N>
template <
typename T,
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<T, cutlass::float_e5m2_t, N, Round> :
public PackedNumericArrayConverter<T, cutlass::float_e5m2_t, N, Round> {};
/// Partial specialization for Array<float_e4m3_t, N> <= Array<S, N>
template <
typename S,
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, S, N, Round> :
public PackedNumericArrayConverter<float_e4m3_t, S, N, Round> {};
/// Partial specialization for Array<float_e5m2_t, N> <= Array<S, N>
template <
typename S,
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, S, N, Round> :
public PackedNumericArrayConverter<float_e5m2_t, S, N, Round> {};
/// Partial specialization for Array<float_e4m3_t, N> <= Array<float_e5m2_t, N>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, cutlass::float_e5m2_t, N, Round> :
public PackedNumericArrayConverter<float_e4m3_t, cutlass::float_e5m2_t, N, Round> {};
/// Partial specialization for Array<float_e5m2_t, N> <= Array<float_e4m3_t, N>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, cutlass::float_e4m3_t, N, Round> :
public PackedNumericArrayConverter<float_e5m2_t, cutlass::float_e4m3_t, N, Round> {};
/// Partial specialization for Array<float_e4m3_t, N> <= Array<float_e4m3_t, N>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, cutlass::float_e4m3_t, N, Round> :
public PackedNumericArrayConverter<float_e4m3_t, cutlass::float_e4m3_t, N, Round> {};
/// Partial specialization for Array<float_e5m2_t, N> <= Array<float_e5m2_t, N>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, cutlass::float_e5m2_t, N, Round> :
public PackedNumericArrayConverter<float_e5m2_t, cutlass::float_e5m2_t, N, Round> {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<int8_t> <= Array<float>
/// Conversion is performed with saturation regardless of setting of
/// the `Round` template parameter.
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, float, 1, Round> {
using result_type = Array<int8_t, 1>;
using source_type = Array<float, 1>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
// Convert to int to int8_t
NumericConverter<int8_t, float, Round> destination_converter;
result_type result;
result[0] = destination_converter(source[0]);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
// To convert a FP32 to Int that has less than 32 bits, we need to convert it to int32 first.
template <
typename T,
int N,
FloatRoundStyle Round
>
struct NumericArrayFP32ToIntConverter {
using result_type = Array<T, N>;
using source_type = Array<float, N>;
static FloatRoundStyle const round_style = Round;
static_assert(platform::numeric_limits<T>::is_integer, "the dest type has to be int.");
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
// Convert float to int
Array<int32_t, N> temporary;
NumericArrayConverter<int32_t, float, N, Round> compute_converter;
temporary = compute_converter(source);
// Convert to int to int8_t
NumericArrayConverter<T, int32_t, N, Round> destination_converter;
return destination_converter(temporary);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, float, N, Round> {
using result_type = Array<int8_t, N>;
using source_type = Array<float, N>;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayFP32ToIntConverter<int8_t, N, Round> converter;
return converter(source);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, float, N, Round> {
using result_type = Array<uint8_t, N>;
using source_type = Array<float, N>;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayFP32ToIntConverter<uint8_t, N, Round> converter;
return converter(source);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<int4b_t, float, N, Round> {
using result_type = Array<int4b_t, N>;
using source_type = Array<float, N>;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayFP32ToIntConverter<int4b_t, N, Round> converter;
return converter(source);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<uint4b_t, float, N, Round> {
using result_type = Array<uint4b_t, N>;
using source_type = Array<float, N>;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayFP32ToIntConverter<uint4b_t, N, Round> converter;
return converter(source);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750) && \
((__CUDACC_VER_MAJOR__ > 10) || \
((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2)))
/// Partial specialization for Array<int4b_t, 8> <= Array<int, 8>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int4b_t, int, 8, Round> {
using result_type = Array<int4b_t, 8>;
using source_type = Array<int, 8>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned out;
asm volatile(
"{ .reg .u32 r4;"
"cvt.pack.sat.s4.s32.b32 r4, %8, %7, 0;"
"cvt.pack.sat.s4.s32.b32 r4, %6, %5, r4;"
"cvt.pack.sat.s4.s32.b32 r4, %4, %3, r4;"
"cvt.pack.sat.s4.s32.b32 %0, %2, %1, r4;"
"}"
: "=r"(out)
: "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]),
"r"(source[4]), "r"(source[5]), "r"(source[6]), "r"(source[7]));
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<int4b_t> <= Array<int>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<int4b_t, int, N, Round> {
static_assert(!(N % 8), "N must be multiple of 8.");
using result_type = Array<int4b_t, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<int4b_t, int, 8, Round> convert_vector_;
result_type result;
Array<int4b_t, 8> *result_ptr = reinterpret_cast<Array<int4b_t, 8> *>(&result);
Array<int, 8> const *source_ptr = reinterpret_cast<Array<int, 8> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 8; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<uint4b_t, 8> <= Array<int, 8>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<uint4b_t, int, 8, Round> {
using result_type = Array<uint4b_t, 8>;
using source_type = Array<int, 8>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned out;
asm volatile(
"{ .reg .u32 r4;"
"cvt.pack.sat.u4.s32.b32 r4, %8, %7, 0;"
"cvt.pack.sat.u4.s32.b32 r4, %6, %5, r4;"
"cvt.pack.sat.u4.s32.b32 r4, %4, %3, r4;"
"cvt.pack.sat.u4.s32.b32 %0, %2, %1, r4;"
"}"
: "=r"(out)
: "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]),
"r"(source[4]), "r"(source[5]), "r"(source[6]), "r"(source[7]));
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<int4b_t> <= Array<int>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<uint4b_t, int, N, Round> {
static_assert(!(N % 8), "N must be multiple of 8.");
using result_type = Array<uint4b_t, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<uint4b_t, int, 8, Round> convert_vector_;
result_type result;
Array<uint4b_t, 8> *result_ptr = reinterpret_cast<Array<uint4b_t, 8> *>(&result);
Array<int, 8> const *source_ptr = reinterpret_cast<Array<int, 8> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 8; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#endif // Conditional guards to enable partial specialization for packed integers
namespace detail {
/*
A helper class that can vectorize a numeric converter with implementation for several vector widths.
The vector widths must be giving in decreasing order or width, and must be a power of 2.
The vector converters must produce identical results to the scalar converters for consistency.
*/
class VectorizedConverter {
private:
// Base case to handle remainder elements as scalars.
template <int Offset, size_t ParentWidth, typename ArrayConverter>
CUTLASS_DEVICE
static void convert_helper(
typename ArrayConverter::result_type& result,
typename ArrayConverter::source_type const& source) {
using ElementRes = typename ArrayConverter::result_type::Element;
using ElementSrc = typename ArrayConverter::source_type::Element;
// If no more converters, handle the remaining elements as scalars.
constexpr int total_elements = ArrayConverter::result_type::kElements;
constexpr int remainder = total_elements - Offset;
static_assert(remainder == (total_elements % ParentWidth), "Unexpected remainder.");
typename ArrayConverter::ScalarConverter scalar_converter;
CUTLASS_PRAGMA_UNROLL
for (int i = Offset; i < ArrayConverter::result_type::kElements; ++i) {
result[i] = scalar_converter(ElementSrc(source[i]));
}
}
template <int Offset, size_t ParentWidth, typename ArrayConverter, typename ResultVectorArray, typename SourceVectorArray, typename... OtherVectorArrays>
CUTLASS_DEVICE
static void convert_helper(typename ArrayConverter::result_type& result, typename ArrayConverter::source_type const& source) {
static_assert(sizeof...(OtherVectorArrays) % 2 == 0, "Vector converters must come in {dst, src} pairs");
static_assert(ResultVectorArray::kElements == SourceVectorArray::kElements, "Vector converters must have the same vector width");
static_assert(cutlass::platform::is_same<typename ArrayConverter::result_type::Element, typename ResultVectorArray::Element>::value,
"ResultVectorArray must have the same type ArrayConverter::result_type");
static_assert(cutlass::platform::is_same<typename ArrayConverter::source_type::Element, typename SourceVectorArray::Element>::value,
"SourceVectorArray must have the same type ArrayConverter::result_type");
static_assert(Offset >= 0 && Offset <= ArrayConverter::result_type::kElements, "Offset must be between 0 and N");
static_assert(ParentWidth == 0 || ParentWidth > ResultVectorArray::kElements, "Vector arrays must be given in decreasing order of width");
constexpr int vector_width = ResultVectorArray::kElements;
static_assert(ispow2(vector_width), "Vector width must be a power of 2");
using ElementRes = typename ArrayConverter::result_type::Element;
using ElementSrc = typename ArrayConverter::source_type::Element;
constexpr int vector_bits_res = vector_width * cutlass::sizeof_bits<ElementRes>::value;
constexpr int vector_bits_src = vector_width * cutlass::sizeof_bits<ElementSrc>::value;
static_assert(vector_bits_res % 8 == 0, "Result vector type must be byte addressed.");
static_assert(vector_bits_src % 8 == 0, "Source vector type must be byte addressed.");
constexpr int vector_offset = Offset / vector_width;
ResultVectorArray* packed_result_vec = reinterpret_cast<ResultVectorArray*>(&result) + vector_offset;
SourceVectorArray const* packed_source_vec = reinterpret_cast<SourceVectorArray const*>(&source) + vector_offset;
// Convert the remaining elements as vectors.
constexpr int total_elements = ArrayConverter::result_type::kElements;
constexpr int groups_of_vec = (total_elements - Offset) / vector_width;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < groups_of_vec; ++i) {
packed_result_vec[i] = ArrayConverter::template packed_convert<ResultVectorArray, SourceVectorArray>(packed_source_vec[i]);
}
constexpr int new_offset = Offset + vector_width * groups_of_vec;
// Recurse to handle other vector converters, or the scalar base case.
convert_helper<new_offset, ResultVectorArray::kElements, ArrayConverter, OtherVectorArrays...>(result, source);
}
public:
/*
A method to convert vectors of elements using the packed_convert method of the converter.
Converters using this class must implement packed convert and support 1 or more vector conversions.
*/
template <typename ArrayConverter, typename ResultVectorArray, typename SourceVectorArray, typename... OtherVectorArrays>
CUTLASS_DEVICE
static void convert(typename ArrayConverter::result_type& result, typename ArrayConverter::source_type const& source) {
convert_helper<0, 0, ArrayConverter, ResultVectorArray, SourceVectorArray, OtherVectorArrays...>(result, source);
}
};
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::float_e4m3_t, N> <= Array<cutlass::int4b_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::float_e4m3_t, cutlass::int4b_t, N, Round> {
using result_type = Array<cutlass::float_e4m3_t, N>;
using source_type = Array<cutlass::int4b_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_8 = Array<cutlass::float_e4m3_t, 8>;
using result_type_packed_4 = Array<cutlass::float_e4m3_t, 4>;
using source_type_packed_8 = Array<cutlass::int4b_t, 8>;
using source_type_packed_4 = Array<cutlass::int4b_t, 4>;
using ScalarConverter = NumericConverter<cutlass::float_e4m3_t, cutlass::int4b_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_8 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
// The core converter uses a lookup table to converts i4 -> e4m3.
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_8>::value &&
platform::is_same<PackedResultType, result_type_packed_8>::value),
"Invalid PackedSrcType/PackedResultType must be 4 or 8 to use private convert dispatch.");
// Hold FP8 outputs in reg. We need 1 reg for every 4 outputs.
cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 4, sizeof(PackedResultType)> r;
// View the input as reg
uint32_t reg = to_reg(source);
// Determines if to get from the signed or unsigned candidates
uint32_t sign = (reg & 0x88888888) >> 1;
// Ignore sign bit when indexing into LUT
uint32_t lut_idx = (reg & 0x77777777);
// Signed is OR'd with 0x32103210 to find the correct value in the LUT
const uint32_t final_prmt_base = 0x32103210;
// [0, 1, 2, 3] encoded as FP8
static constexpr uint32_t POS_E4M3s_REG1 = 0x44403800;
// [4, 5, 6, 7] encoded as FP8
static constexpr uint32_t POS_E4M3s_REG2 = 0x4E4C4A48;
// [-1, -2, -3, -4] encoded as FP8
static constexpr uint32_t NEG_E4M3s_REG1 = 0xCACCCED0;
// [-5, -6, -7, -7] encoded as FP8
static constexpr uint32_t NEG_E4M3s_REG2 = 0xB8C0C4C8;
const int iters = PackedSrcType::kElements / 4;
#pragma unroll
for (int ii = 0; ii < iters; ++ii, lut_idx >>=16, sign >>=16) {
uint32_t final_prmt_idx = final_prmt_base | sign;
// This uses a look up table to convert packed int4s to packed fp8s, using the int4 value
// as the index to prmt.
// It first select both the positive and negative candidates, then uses the sign bit to
// select the correct candidate.
asm volatile(
"{\n"
" .reg .b32 pos_f8s, neg_f8s;\n"
" prmt.b32 pos_f8s, %1, %2, %5;\n"
" prmt.b32 neg_f8s, %3, %4, %5;\n"
" prmt.b32 %0, pos_f8s, neg_f8s, %6;\n"
"}\n"
: "=r"(r[ii])
: "n"(POS_E4M3s_REG1), "n"(POS_E4M3s_REG2), "n"(NEG_E4M3s_REG1), "n"(NEG_E4M3s_REG2),
"r"(lut_idx), "r"(final_prmt_idx));
}
return reinterpret_cast<PackedResultType&>(r);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_8, source_type_packed_8,
result_type_packed_4, source_type_packed_4>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float, N> <= Array<cutlass::int4b_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<float, cutlass::int4b_t, N, Round> {
using result_type = Array<float, N>;
using source_type = Array<cutlass::int4b_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_8 = Array<float, 8>;
using result_type_packed_4 = Array<float, 4>;
using result_type_packed_2 = Array<float, 2>;
using source_type_packed_8 = Array<cutlass::int4b_t, 8>;
using source_type_packed_4 = Array<cutlass::int4b_t, 4>;
using source_type_packed_2 = Array<cutlass::int4b_t, 2>;
using ScalarConverter = NumericConverter<float, cutlass::int4b_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint8_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_8 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
template <int offset, int elements_to_convert, typename PackedResultType>
CUTLASS_DEVICE
static void packed_convert_vec(PackedResultType& result, uint32_t src_reg) {
static_assert(offset == 0 || offset == 4, "Invalid offset");
// Selects one of the bottom int4s and constructs:
// 8388608 + (x + 8)
// 8388608 + 16 * (x + 8)
// 8388608 + 256 * (x + 8)
// 8388608 + 4096 * (x + 8)
uint32_t const and_masks[4] = {0x0000000F, 0x000000F0, 0x00000F00, 0x0000F000};
uint32_t const xor_masks[4] = {0x4B000008, 0x4B000080, 0x4B000800, 0x4B008000};
float const scales[4] = {1.f, 1.f / 16.f, 1.f / 256.f, 1.f / 4096.f};
float const offsets[4] = {-8388616.f, -524296.f, -32776.f, -2056.f};
static constexpr uint32_t immLut = (0xf0 & 0xcc) ^ 0xaa;
uint32_t* result_as_int = reinterpret_cast<uint32_t*>(&result);
// For each operand, computes:
// r[i] = (r[i] & and_mask) ^ xor_mask
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < elements_to_convert; ++ii) {
asm volatile(
"{\n"
" lop3.b32 %0, %1, %2, %3, %4;\n"
"}\n"
: "=r"(result_as_int[offset + ii])
: "r"(src_reg), "r"(and_masks[ii]), "r"(xor_masks[ii]), "n"(immLut));
result[offset + ii] = __fmaf_rn(result[offset + ii], scales[ii], offsets[ii]);
}
}
// The core converter uses bit tricks to construct a known FP16 number, then does a
// subtraction in FP16 for the final result.
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_8>::value &&
platform::is_same<PackedResultType, result_type_packed_8>::value),
"Invalid PackedSrcType/PackedResultType must be 1, 2, 4 or 8 to use private convert dispatch.");
// Hold output FP16s in reg. We need 1 reg for every 2 elements
PackedResultType r;
// View the input as reg
uint32_t src_reg = to_reg(source);
constexpr int total_elements = PackedResultType::kElements == 8 ? 4 : PackedResultType::kElements;
packed_convert_vec<0, total_elements>(r, src_reg);
if (PackedResultType::kElements == 8) {
uint32_t src_reg_shifted = src_reg >> 16;
packed_convert_vec<4, 4>(r, src_reg_shifted);
}
return r;
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_8, source_type_packed_8,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float, N> <= Array<int8_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<float, int8_t, N, Round> {
using result_type = Array<float, N>;
using source_type = Array<int8_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_4 = Array<float, 4>;
using result_type_packed_2 = Array<float, 2>;
using source_type_packed_4 = Array<int8_t, 4>;
using source_type_packed_2 = Array<int8_t, 2>;
using ScalarConverter = NumericConverter<float, int8_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value),
"Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch.");
PackedResultType r;
// View the input as reg
uint32_t src_reg = to_reg(source);
static constexpr int fp32_base = 0x4B400000;
uint32_t const prmt_indices[4] = {0x8880, 0x9991, 0xAAA2, 0xBBB3};
int* result_as_int = reinterpret_cast<int*>(&r);
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < PackedResultType::kElements; ++ii) {
asm volatile("prmt.b32 %0,%1,%1,%2;\n" : "=r"(result_as_int[ii]) : "r"(src_reg), "r"(prmt_indices[ii]));
}
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < PackedResultType::kElements; ++ii)
{
result_as_int[ii] += fp32_base;
r[ii] -= reinterpret_cast<const float&>(fp32_base);
}
return r;
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float, N> <= Array<uint8_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<float, uint8_t, N, Round> {
using result_type = Array<float, N>;
using source_type = Array<uint8_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_4 = Array<float, 4>;
using result_type_packed_2 = Array<float, 2>;
using source_type_packed_4 = Array<uint8_t, 4>;
using source_type_packed_2 = Array<uint8_t, 2>;
using ScalarConverter = NumericConverter<float, uint8_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value),
"Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch.");
PackedResultType r;
// View the input as reg
uint32_t src_reg = to_reg(source);
// __byte_perm simulates the add.u32 0x4B000000 to every u8 element of u8x4 source and stores
// the result in r (without introducing extra cvt.u32.u8 instruction)
uint32_t const prmt_indices[4] = {0x7650, 0x7651, 0x7652, 0x7653};
uint32_t* result_as_int = reinterpret_cast<uint32_t*>(&r);
for (int ii = 0; ii < PackedResultType::kElements; ++ii) {
result_as_int[ii] = __byte_perm(src_reg, 0x4B000000, prmt_indices[ii]);
// Subtract the magic number 0x4B000000 from tmp in floating-point arithmetic to obtain final result
r[ii] -= 8388608.f;
}
return r;
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::half_t, N> <= Array<cutlass::int4b_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::half_t, cutlass::int4b_t, N, Round> {
using result_type = Array<cutlass::half_t, N>;
using source_type = Array<cutlass::int4b_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_8 = Array<cutlass::half_t, 8>;
using result_type_packed_4 = Array<cutlass::half_t, 4>;
using result_type_packed_2 = Array<cutlass::half_t, 2>;
using source_type_packed_8 = Array<cutlass::int4b_t, 8>;
using source_type_packed_4 = Array<cutlass::int4b_t, 4>;
using source_type_packed_2 = Array<cutlass::int4b_t, 2>;
using ScalarConverter = NumericConverter<cutlass::half_t, cutlass::int4b_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint8_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_8 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
// The core converter uses bit tricks to construct a known FP16 number, then does a
// subtraction in FP16 for the final result.
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_8>::value &&
platform::is_same<PackedResultType, result_type_packed_8>::value),
"Invalid PackedSrcType/PackedResultType must be 2, 4 or 8 to use private convert dispatch.");
// Hold output FP16s in reg. We need 1 reg for every 2 elements
using RegArray = cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 2, sizeof(PackedResultType)>;
RegArray r;
// View the input as reg
uint32_t src_reg = to_reg(source);
// Below constructs the following temporary:
// fp16s_01 = {0x00, i4_01, 0x00, i4_01}
// fp16s_23 = {0x00, i4_23, 0x00, i4_23}
// fp16s_45 = {0x00, i4_45, 0x00, i4_45}
// fp16s_67 = {0x00, i4_67, 0x00, i4_67}
// We use inline asm instead of __byte_perm intrinsic since we don't want the documented (& 0x7) on the index. NVCC
// might be able to optimize it out since the index is a constexpr, but we choose to be safe about it here.
uint32_t prmt_indices[4] = {0x4040, 0x4141, 0x4242, 0x4343};
static_assert(RegArray::kElements <= 4, "Too many inputs for F16 -> I4 vector converter");
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
asm volatile(
"{\n"
" prmt.b32 %0, %1, %2, %3;\n"
"}\n"
: "=r"(r[ii])
: "r"(src_reg), "n"(0), "r"(prmt_indices[ii]));
}
// The below XOR does the following:
// 1) Sets the exponent bits of the FP16 to the correct value for the FP16 magic_num. We will be constructing
// 1024 + x + 8 OR 1024 + 16 * (x + 8), then using hfma to subtract 1032 from that
// 2) Adds 8 to the int4 value that we will process in the FP16 (for uint4, we can simply avoid this step)
// The AND does the following:
// 1) Clear the set bits for the int4 we will ignore.
// We use lop3 so that we can use 1 instruction for AND and XOR.
static constexpr uint32_t xor_mask = 0x64806408;
static constexpr uint32_t and_mask = 0xFFF0FF0F;
static constexpr uint32_t immLut = (0xf0 & 0xcc) ^ 0xaa;
// For each operand, computes:
// r[i] = (r[i] & and_mask) ^ xor_mask
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
asm volatile(
"{\n"
" lop3.b32 %0, %0, %1, %2, %3;\n"
"}\n"
: "+r"(r[ii])
: "n"(and_mask), "n"(xor_mask), "n"(immLut));
}
// We will issue 2 hfmas that do the following:
// For the high FP16:
// Divide by 16 {packed as a operand} to get:
// 64 + (x + 8)
// x + 72
// Subtract 72 {packed as c operand} to get x
// For the low FP16:
// 1024 + (x + 8)
// x + 1032
// So, we subtract 1032 {packed as c operand} to get x
// {-72, -1032}
static constexpr uint32_t hfma_bias_rep = 0xD480E408;
// {1 / 16, 1}
static constexpr uint32_t hfma_scale_rep = 0x2C003C00;
const half2& hfma_bias = reinterpret_cast<const half2&>(hfma_bias_rep);
const half2& hfma_scale = reinterpret_cast<const half2&>(hfma_scale_rep);
// Scale and subtract the FP16s to get the original int4 number as FP16.
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
half2& fp16x2_val = reinterpret_cast<__half2&>(r[ii]);
fp16x2_val = __hfma2(hfma_scale, fp16x2_val, hfma_bias);
}
return reinterpret_cast<PackedResultType&>(r);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_8, source_type_packed_8,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<cutlass::half_t, N> <= Array<int8_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::half_t, int8_t, N, Round> {
using result_type = Array<cutlass::half_t, N>;
using source_type = Array<int8_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_4 = Array<cutlass::half_t, 4>;
using result_type_packed_2 = Array<cutlass::half_t, 2>;
using source_type_packed_4 = Array<int8_t, 4>;
using source_type_packed_2 = Array<int8_t, 2>;
using ScalarConverter = NumericConverter<cutlass::half_t, int8_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
// The core converter uses bit tricks to construct a known FP16 number, then does a
// subtraction in FP16 for the final result.
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value),
"Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch.");
// Hold output FP16s in reg. We need 1 reg for every 2 elements
using RegArray = cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 2, sizeof(PackedResultType)>;
RegArray r;
#if 0 // Scalar conversion (Please keep this code for reference for vectorized version below)
auto result = reinterpret_cast<PackedResultType&>(r);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < PackedResultType::kElements; ++i) {
int16_t tmp = source[i] + 26112 /* 0x6600 */;
result[i] = reinterpret_cast<cutlass::half_t const &>(tmp) - 1536.0_hf;
}
#endif
// View the input as reg
uint32_t src_reg = to_reg(source);
uint32_t const prmt_indices[2] = {0x9180, 0xB3A2};
// Pack s8x2 (s8[1], s8[0]) -> s16x2 (sext.s8[1], sext.s8[0])
// (See https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-prmt)
// The inline ptx below uses `msb=0` and `msb=1` from the above link to sign-extend the sign bit in 0, 1, 2, 3 bytes of s8x4
// into result_ptr[0] and result_ptr[1]'s 08-15 and 24-31 bits, respectively.
// Note that `__byte_perm(source_ptr[0], source_ptr[0], 0x9180);` won't achieve the same result and doesn't sign-extend the sign bit.
// Thus, we use inline ptx `prmt.b32` instruction for the desired sign extend from s8x2 to s16x2.
for (int ii = 0; ii < RegArray::kElements; ++ii) {
asm volatile("prmt.b32 %0,%1,%1,%2;\n" : "=r"(r[ii]) : "r"(src_reg), "r"(prmt_indices[ii]));
}
// In the absense of add.s16x2 instruction, use bit-wise operation to execute signed addition with magic numbers to achieve
// the same result as add.s16x2 instruction.
// (See https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#logic-and-shift-instructions-lop3)
// For a logical operation F(a, b, c) the value of kImmLut can be computed by applying the same operation to
// three predefined constant values as follows:
// ta = 0xF0;
// tb = 0xCC;
// tc = 0xAA;
// kImmLut = F(ta, tb, tc);
// If we want F = ((a & b) ^ c) then set kImmLut = (0xF0 & 0xCC) ^ 0xAA
static constexpr uint32_t kImmLut = (0xF0 & 0xCC) ^ 0xAA;
for (int ii = 0; ii < RegArray::kElements; ++ii) {
// The bit-wise operation executed below is `r[ii] = (r[ii] & 0x03FF03FF) ^ 0x66006600;`
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n" :
"=r"(r[ii]) : "r"(r[ii]), "n"(0x03FF03FF), "n"(0x66006600), "n"(kImmLut));
}
static constexpr uint32_t bias_rep = 0x66006600;
const half2& bias = reinterpret_cast<const half2&>(bias_rep);
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
half2& fp16x2_val = reinterpret_cast<__half2&>(r[ii]);
fp16x2_val = __hsub2(fp16x2_val, bias);
}
return reinterpret_cast<PackedResultType&>(r);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<cutlass::half_t, N> <= Array<uint8_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::half_t, uint8_t, N, Round> {
using result_type = Array<cutlass::half_t, N>;
using source_type = Array<uint8_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_4 = Array<cutlass::half_t, 4>;
using result_type_packed_2 = Array<cutlass::half_t, 2>;
using source_type_packed_4 = Array<uint8_t, 4>;
using source_type_packed_2 = Array<uint8_t, 2>;
using ScalarConverter = NumericConverter<cutlass::half_t, uint8_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value),
"Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch.");
// Hold output FP16s in reg. We need 1 reg for every 2 elements
using RegArray = cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 2, sizeof(PackedResultType)>;
RegArray r;
// View the input as reg
uint32_t src_reg = to_reg(source);
uint32_t const prmt_indices[2] = {0x5150, 0x5352};
static constexpr uint32_t start_byte_for_fp16 = 0x64646464;
for (int ii = 0; ii < RegArray::kElements; ++ii) {
asm volatile("prmt.b32 %0,%1,%2,%3;\n" : "=r"(r[ii]) : "r"(src_reg), "n"(start_byte_for_fp16), "r"(prmt_indices[ii]));
}
static constexpr uint32_t bias_rep = 0x64006400;
const half2& bias = reinterpret_cast<const half2&>(bias_rep);
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
half2& fp16x2_val = reinterpret_cast<__half2&>(r[ii]);
fp16x2_val = __hsub2(fp16x2_val, bias);
}
return reinterpret_cast<PackedResultType&>(r);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::bfloat16_t, N> <= Array<cutlass::int4b_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::bfloat16_t, cutlass::int4b_t, N, Round> {
using result_type = Array<cutlass::bfloat16_t, N>;
using source_type = Array<cutlass::int4b_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_8 = Array<cutlass::bfloat16_t, 8>;
using result_type_packed_4 = Array<cutlass::bfloat16_t, 4>;
using result_type_packed_2 = Array<cutlass::bfloat16_t, 2>;
using source_type_packed_8 = Array<cutlass::int4b_t, 8>;
using source_type_packed_4 = Array<cutlass::int4b_t, 4>;
using source_type_packed_2 = Array<cutlass::int4b_t, 2>;
using ScalarConverter = NumericConverter<cutlass::bfloat16_t, cutlass::int4b_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint8_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_8 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
// The core converter uses bit tricks to construct a known FP16 number, then does a
// subtraction in FP16 for the final result.
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_8>::value &&
platform::is_same<PackedResultType, result_type_packed_8>::value),
"Invalid PackedSrcType/PackedResultType must be 2, 4 or 8 to use private convert dispatch.");
// Hold output FP16s in reg. We need 1 reg for every 2 elements
using RegArray = cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 2, sizeof(PackedResultType)>;
RegArray r;
// View the input as reg
uint32_t src_reg = to_reg(source);
uint32_t src_reg_shifted = src_reg >> 4;
// Below constructs the following temporary:
uint32_t const prmt_indices[4] = {0xF4F0, 0xF5F1, 0xF6F2, 0xF7F3};
static_assert(RegArray::kElements <= 4, "Too many inputs for BF16 -> I4 vector converter");
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
asm volatile(
"{\n"
" prmt.b32 %0, %1, %2, %3;\n"
"}\n"
: "=r"(r[ii])
: "r"(src_reg), "r"(src_reg_shifted), "r"(prmt_indices[ii]));
}
// The below XOR does the following:
// 1) Sets the exponent bits of the FP16 to the correct value for the FP16 magic_num. We will be constructing
// 128 + (x + 8) and subtracting 136 to get x
static constexpr uint32_t xor_mask = 0x43084308;
static constexpr uint32_t and_mask = 0x000F000F;
static constexpr uint32_t immLut = (0xf0 & 0xcc) ^ 0xaa;
// For each operand, computes:
// r[i] = (r[i] & and_mask) ^ xor_mask
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
asm volatile(
"{\n"
" lop3.b32 %0, %0, %1, %2, %3;\n"
"}\n"
: "+r"(r[ii])
: "n"(and_mask), "n"(xor_mask), "n"(immLut));
}
// We will issue 2 bfmas that do the following:
// high BF16:
// hi_bf16 - 136, lo_bf16 - 136
// This is the BF16 {136, 136} represented as an integer.
static constexpr uint32_t bias_rep = 0x43084308;
const __nv_bfloat162& bias = reinterpret_cast<const __nv_bfloat162&>(bias_rep);
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
__nv_bfloat162& bf16x2_val = reinterpret_cast<__nv_bfloat162&>(r[ii]);
bf16x2_val = __hsub2(bf16x2_val, bias);
}
return reinterpret_cast<PackedResultType&>(r);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_8, source_type_packed_8,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<cutlass::bfloat16_t, N> <= Array<int8_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::bfloat16_t, int8_t, N, Round> {
using result_type = Array<cutlass::bfloat16_t, N>;
using source_type = Array<int8_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_4 = Array<cutlass::bfloat16_t, 4>;
using result_type_packed_2 = Array<cutlass::bfloat16_t, 2>;
using source_type_packed_4 = Array<int8_t, 4>;
using source_type_packed_2 = Array<int8_t, 2>;
using ScalarConverter = NumericConverter<cutlass::bfloat16_t, int8_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value),
"Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch.");
NumericArrayConverter<float, int8_t, PackedResultType::kElements, Round> convert_int8_to_f32;
Array<float, PackedResultType::kElements> tmp = convert_int8_to_f32(source);
NumericArrayConverter<cutlass::bfloat16_t, float, PackedResultType::kElements, Round> convert_f32_to_bf16;
return convert_f32_to_bf16(tmp);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<cutlass::bfloat16_t, N> <= Array<uint8_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::bfloat16_t, uint8_t, N, Round> {
using result_type = Array<cutlass::bfloat16_t, N>;
using source_type = Array<uint8_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_4 = Array<cutlass::bfloat16_t, 4>;
using result_type_packed_2 = Array<cutlass::bfloat16_t, 2>;
using source_type_packed_4 = Array<uint8_t, 4>;
using source_type_packed_2 = Array<uint8_t, 2>;
using ScalarConverter = NumericConverter<cutlass::bfloat16_t, uint8_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value),
"Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch.");
NumericArrayConverter<float, uint8_t, PackedResultType::kElements, Round> convert_uint8_to_f32;
Array<float, PackedResultType::kElements> tmp = convert_uint8_to_f32(source);
NumericArrayConverter<cutlass::bfloat16_t, float, PackedResultType::kElements, Round> convert_f32_to_bf16_;
return convert_f32_to_bf16_(tmp);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#endif // defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// FastNumericArrayConverter only works when the source is within center range.
/// Conversion operator for Array. See the comments before
/// FastLinearCombinationClamp.
template <typename T, typename S, int N,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
typename Enable = void>
struct FastNumericArrayConverter {
using result_type = Array<T, N>;
using source_type = Array<S, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const &s) {
NumericArrayConverter<T, S, N, Round> convert_;
return convert_(s);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const { return convert(s); }
};
/// Partial specialization for Array<float> <= Array<int>
template <int N, FloatRoundStyle Round>
struct FastNumericArrayConverter<float, int, N, Round> {
using result_type = Array<float, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
int tmp = source[i] + 1262485504 /*0x4B400000*/;
result[i] = reinterpret_cast<float const &>(tmp) - 12582912.0f;
}
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const { return convert(s); }
};
/// Partial specialization for Array<int8_t, 4> <= Array<float, 4>
template <FloatRoundStyle Round>
struct FastNumericArrayConverter<int8_t, float, 4, Round> {
using result_type = Array<int8_t, 4>;
using source_type = Array<float, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
Array<int32_t, 4> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
float tmp = source[i] + 12582912.0f;
result[i] = reinterpret_cast<int32_t const &>(tmp);
}
result[0] = __byte_perm(result[0], result[1], 0x40);
result[2] = __byte_perm(result[2], result[3], 0x40);
result[0] = __byte_perm(result[0], result[2], 0x5410);
return reinterpret_cast<result_type const &>(result[0]);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const { return convert(s); }
};
/// Partial specialization for Array<int8_t> <= Array<float>
template <int N, FloatRoundStyle Round>
struct FastNumericArrayConverter<int8_t, float, N, Round> {
static_assert(!(N % 4), "N must be multiple of 4.");
using result_type = Array<int8_t, N>;
using source_type = Array<float, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
FastNumericArrayConverter<int8_t, float, 4, Round> convert_vector_;
result_type result;
Array<int8_t, 4> *result_ptr =
reinterpret_cast<Array<int8_t, 4> *>(&result);
Array<float, 4> const *source_ptr =
reinterpret_cast<Array<float, 4> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 4; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const { return convert(s); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines preferred rounding mode for a pair of types
template <typename T, typename S>
struct PreferredRoundingMode {
static FloatRoundStyle const kRound = FloatRoundStyle::round_to_nearest;
};
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 900
/// Defines preferred rounding mode for a pair of types
template <>
struct PreferredRoundingMode<cutlass::tfloat32_t, float> {
static FloatRoundStyle const kRound = FloatRoundStyle::round_half_ulp_truncate;
};
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Packs predicates into an array.
template <int N>
struct PackPredicates {
using result_type = Array<uint1b_t, N>;
static_assert(!(N % 4), "Must pack predicates in a count that is a multiple of 4");
CUTLASS_HOST_DEVICE
result_type operator()(bool const predicates[]) {
result_type packed;
packed.clear();
int const kWordSize = 8;
uint8_t *bytes = reinterpret_cast<uint8_t *>(packed.data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
int word_idx = (i / kWordSize);
int bit_idx = (i % kWordSize);
uint8_t mask = static_cast<uint8_t>((predicates[i] ? 1u : 0u) << bit_idx);
bytes[word_idx] = (bytes[word_idx] | mask);
}
return packed;
}
};
/// Packs predicates into an array
template <int N>
struct UnpackPredicates {
using result_type = Array<uint1b_t, N>;
static_assert(!(N % 4), "Must unpack predicates in a count that is a multiple of 4");
CUTLASS_HOST_DEVICE
void operator()(bool predicates[], result_type const &packed) {
int const kWordSize = 8;
uint8_t const *bytes = reinterpret_cast<uint8_t const *>(packed.data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
int word_idx = (i / kWordSize);
int bit_idx = (i % kWordSize);
predicates[i] = bool((bytes[word_idx] >> bit_idx) & 0x1);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/numeric_conversion.h/0 | {
"file_path": "include/cutlass/numeric_conversion.h",
"repo_id": "include",
"token_count": 48702
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over one or more ranks of an affine tensor
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/device_kernel.h"
#include "cutlass/reduction/thread/reduction_operators.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (i.e. number of outer ranks)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
struct TensorReductionAffineContiguousParams {
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
Coord<kRank> extent; /// Extent of source tensor
FastDivmodU64 divmod[kRank - 1]; /// FastDivmod by each strided rank
int64_t dst_stride[kReducedRank]; /// stride (units of bytes) - I, J
int64_t src_stride[kRank - 1]; /// stride (units of bytes) - I, J, K
int64_t workspace_stride; /// stride (units of bytes) between workspace
int workspace_count; /// number of workspaces
uint64_t inner_count; /// Number of elements in reduced index space
uint64_t outer_count; /// Number of elements in outer index space
ElementOutput * destination; /// Pointer to output tensor of rank kReducedRank
ElementSource const * source; /// Pointer to source pointer of rank kRank
ReductionOp reduction_op; /// Reduction operator
ElementCompute reduction_identity; /// Identity element used by reduction operator
ElementCompute *device_workspace; /// Pointer to device workspace for inter-CTA reductions
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorReductionAffineContiguousParams() {
}
/// Ctor
TensorReductionAffineContiguousParams(
Coord<kRank> extent_, ///< Extent of source tensor
ElementOutput * dst_ptr_, ///< Output tensor data
int64_t dst_stride_[], ///< Stride (units of elements)
ElementSource const * src_ptr_, ///< Source tensor data
int64_t src_stride_[], ///< Stride (units of elements)
ElementCompute *device_workspace_, ///< Pointer to device workspace for inter-CTA reductions
int64_t workspace_stride_, ///< Stride between workspaces
int workspace_count_, ///< Number of workspaces
ReductionOp reduction_op_, ///< Reduction operator
ElementCompute reduction_identity_ = ElementCompute() ///< Identity element used by reduction operator
):
extent(extent_),
inner_count(1),
outer_count(1),
destination(dst_ptr_),
source(src_ptr_),
device_workspace(device_workspace_),
workspace_stride(workspace_stride_),
workspace_count(workspace_count_),
reduction_op(reduction_op_),
reduction_identity(reduction_identity_) {
// Initialize divisors for fast div-mod
for (int p = 1; p < kRank; ++p) {
divmod[p - 1] = FastDivmodU64(uint64_t(extent[p]));
}
int input_size_bits = sizeof_bits<ElementSource>::value;
int output_size_bits = sizeof_bits<ElementOutput>::value;
// Compute strides in units of bytes
for (int p = 0; p < kReducedRank; ++p) {
dst_stride[p] = dst_stride_[p] * output_size_bits / 8;
}
for (int p = 0; p < kRank - 1; ++p) {
src_stride[p] = src_stride_[p] * input_size_bits / 8;
}
// Compute number of elements in strided ranks
for (int p = 0; p < kReducedRank; ++p) {
outer_count *= uint64_t(extent[p]);
}
for (int p = 0; p < kInnerRank; ++p) {
inner_count *= uint64_t(extent[kRank - 1 - p]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to reduce a tensor with affine layout over a set of ranks *INCLUDING* the contiguous
/// rank. This leads to favorable vectorized memory accesses over the contiguous rank.
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
class TensorReductionAffineContiguous {
public:
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
using ComputeFragment = Array<ElementCompute, VectorLength>;
using SourceFragment = AlignedArray<ElementSource, VectorLength>;
using OutputFragment = AlignedArray<ElementOutput, VectorLength>;
/// Shared memory allocation used for reduction within the CTA
struct SharedStorage {
Array<ElementCompute, kThreads * kVectorLength> workspace;
};
/// Parameters structure
using Params = TensorReductionAffineContiguousParams<
Rank,
ReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
VectorLength,
ElementCompute,
Threads,
BatchSize
>;
private:
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_inner_coord_and_offset_(
Params const ¶ms,
Coord<kInnerRank> & coord,
int64_t &src_offset,
uint64_t linear_idx) const {
// Decompose into a coordinate of rank <kInnerRank>
coord = CoordinateDecomposition<kInnerRank>(linear_idx, ¶ms.divmod[kRank - kInnerRank]);
// Compute an offset using the souce stride
src_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kInnerRank - 1; ++i) {
src_offset += coord[i] * params.src_stride[kReducedRank + i];
}
src_offset += coord[kInnerRank - 1] * sizeof_bits<ElementSource>::value / 8;
}
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_outer_coord_and_offset_(
Params const ¶ms,
Coord<kReducedRank> & coord,
int64_t &dst_offset,
int64_t &src_offset,
uint64_t linear_idx) const {
// Decompose into coordinate of rank <kReducedRank>
coord = CoordinateDecomposition<kReducedRank>(linear_idx, params.divmod);
// Compute offsets using destination and source strides
dst_offset = 0;
src_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kReducedRank; ++i) {
dst_offset += params.dst_stride[i] * coord[i];
src_offset += params.src_stride[i] * coord[i];
}
}
/// Reduces over the reduction indices yielding a single element
CUTLASS_DEVICE
ElementCompute reduce_indices_(
Params const ¶ms,
ElementCompute *threadblock_workspace,
char const *src_byte_ptr,
int coord_c) {
NumericArrayConverter<ElementCompute, ElementSource, VectorLength> convert_source;
ReductionOp reduction_op(params.reduction_op);
//
// Early exit or initialize to identity element
//
if (!params.inner_count) {
return params.reduction_identity;
}
ComputeFragment accumulator;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < int(accumulator.size()); ++i) {
accumulator[i] = params.reduction_identity;
}
// Compute the coordinate of the first access
int64_t src_byte_offset = 0;
Coord<kInnerRank> coord;
uint64_t linear_idx = (threadIdx.x + blockDim.x * threadIdx.z + blockDim.x * blockIdx.z * blockDim.z) * kVectorLength;
compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx);
// Load the first vector
SourceFragment source_fragment[kBatchSize];
bool not_done = true;
// Iterate over vectors in a linearized reduction index space
while (not_done) {
bool guards[kBatchSize];
// Issue a batch of loads
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (linear_idx < params.inner_count) {
source_fragment[b] = *reinterpret_cast<SourceFragment const *>(src_byte_ptr + src_byte_offset);
guards[b] = true;
}
else {
guards[b] = false;
not_done = false;
}
linear_idx += (blockDim.z * gridDim.z * blockDim.x) * kVectorLength;
compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx);
}
// Perform a batch of reduction operations
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (guards[b]) {
auto cvt = convert_source(source_fragment[b]);
accumulator = cutlass::reduction::thread::detail::ApplyArrayOperator(
reduction_op,
accumulator,
cvt);
}
}
};
//
// Reduction of vectors to scalar
//
ElementCompute reduced_accumulator = accumulator[0];
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < kVectorLength; ++i) {
reduced_accumulator = reduction_op(reduced_accumulator, accumulator[i]);
}
//
// Reduction within CTA across threadIdx.xz => threadIdx{.x = 0, .z = 0}
//
// This re-arranges data so threadIdx.y is effectively a row index and threadIdx.xz is a column
//
int thread_count = blockDim.x * blockDim.z;
int thread_j = threadIdx.x + blockDim.x * threadIdx.z;
int thread_i = threadIdx.y;
ElementCompute *frag_ptr = reinterpret_cast<ElementCompute *>(threadblock_workspace) + thread_i * thread_count;
frag_ptr[thread_j] = reduced_accumulator;
//
// Reduce
//
CUTLASS_PRAGMA_NO_UNROLL
while (thread_count > 1) {
thread_count /= 2;
__syncthreads();
if (thread_j < thread_count) {
ElementCompute other = frag_ptr[thread_j + thread_count];
reduced_accumulator = reduction_op(reduced_accumulator, other);
frag_ptr[thread_j] = reduced_accumulator;
}
__syncthreads();
}
return reduced_accumulator;
}
public:
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
int coord_c = (blockIdx.x * blockDim.x + threadIdx.x) * kVectorLength;
char const * src_byte_ptr = reinterpret_cast<char const *>(params.source);
char * dst_byte_ptr = nullptr;
// If performing a reduction across CTAs, redirect output to device workspace
if (gridDim.z == 1) {
dst_byte_ptr = reinterpret_cast<char *>(params.destination);
}
else {
dst_byte_ptr = reinterpret_cast<char *>(params.device_workspace);
}
uint64_t idx_linear = blockIdx.y * blockDim.y + threadIdx.y;
// Use modulo division to compute location
Coord<kReducedRank> outer_coord;
int64_t dst_byte_offset;
int64_t src_byte_offset;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
if (gridDim.z == 1) {
/// Complete the reduction with no workspace
while (idx_linear < params.outer_count) {
ElementCompute result = reduce_indices_(
params,
shared_storage.workspace.data(),
src_byte_ptr + src_byte_offset,
coord_c);
// Store the result after possible final reduction within the CTA
if (threadIdx.z == 0 && threadIdx.x == 0) {
// Convert to output type and store
NumericConverter<ElementOutput, ElementCompute> convert_output;
ElementOutput cvt = convert_output(result);
*reinterpret_cast<ElementOutput *>(dst_byte_ptr + dst_byte_offset) = cvt;
}
__syncthreads();
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
} // while
}
else {
/// Complete the reduction with workspace
while (idx_linear < params.outer_count) {
ElementCompute result = reduce_indices_(
params,
shared_storage.workspace.data(),
src_byte_ptr + src_byte_offset,
coord_c);
int64_t byte_offset =
blockIdx.z * params.workspace_stride + idx_linear * sizeof_bits<ElementCompute>::value / 8;
// Store the result for final reduction
if (threadIdx.z == 0 && threadIdx.x == 0) {
*reinterpret_cast<ElementCompute *>(dst_byte_ptr + byte_offset) = result;
}
__syncthreads();
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
} // while
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to perform final reduction
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
class TensorReductionAffineContiguousFinal {
public:
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
/// Shared memory
struct SharedStorage { };
/// Parameters structure
using Params = TensorReductionAffineContiguousParams<
Rank,
ReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
VectorLength,
ElementCompute,
Threads,
BatchSize
>;
private:
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_outer_coord_and_offset_(
Params const ¶ms,
Coord<kReducedRank> & coord,
int64_t &dst_offset,
uint64_t linear_idx) const {
// Decompose into coordinate of rank <kReducedRank>
coord = CoordinateDecomposition<kReducedRank>(linear_idx, params.divmod);
// Compute offsets using destination and source strides
dst_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kReducedRank; ++i) {
dst_offset += params.dst_stride[i] * coord[i];
}
}
/// Reduces over the reduction indices
CUTLASS_DEVICE
ElementCompute reduce_indices_(
Params const ¶ms,
ElementCompute const *device_workspace) {
ReductionOp reduction_op(params.reduction_op);
char const *src_byte_ptr = reinterpret_cast<char const *>(device_workspace);
// Accumulated output
ElementCompute accumulator = params.reduction_identity;
for (int iter = 0; iter < params.workspace_count; ++iter) {
ElementCompute workspace_item = *reinterpret_cast<ElementCompute const *>(src_byte_ptr);
accumulator = reduction_op(accumulator, workspace_item);
src_byte_ptr += params.workspace_stride;
}
return accumulator;
}
public:
//
// Methods
//
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
uint64_t idx_linear = blockIdx.x * blockDim.x + threadIdx.x;
char * dst_byte_ptr = reinterpret_cast<char *>(params.destination);
// Use modulo division to compute location
Coord<kReducedRank> outer_coord;
int64_t dst_byte_offset;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
idx_linear);
/// Complete the reduction
while (idx_linear < params.outer_count) {
ElementCompute result = reduce_indices_(params, params.device_workspace + idx_linear);
// Convert to output type and store
NumericConverter<ElementOutput, ElementCompute> convert_output;
*reinterpret_cast<ElementOutput *>(dst_byte_ptr + dst_byte_offset) = convert_output(result);
// Update indices and pointers
idx_linear += gridDim.x * blockDim.x;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
idx_linear);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h/0 | {
"file_path": "include/cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h",
"repo_id": "include",
"token_count": 7946
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing how threads are mapped to a given tile.
*/
#pragma once
#include "cute/arch/mma_sm90_gmma.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace collective {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
using namespace cute;
template <bool Transpose, class SmemLayoutAtom, class ElementType>
constexpr auto
gmma_smem_transpose_or_passthrough() {
if constexpr (Transpose) {
if constexpr (cute::is_same_v<GMMA::Layout_MN_SW128_Atom<ElementType>, SmemLayoutAtom>) {
return GMMA::Layout_K_SW128_Atom<ElementType>{};
}
else if constexpr (cute::is_same_v<GMMA::Layout_MN_SW64_Atom<ElementType>, SmemLayoutAtom>) {
return GMMA::Layout_K_SW64_Atom<ElementType>{};
}
else if constexpr (cute::is_same_v<GMMA::Layout_MN_SW32_Atom<ElementType>, SmemLayoutAtom>) {
return GMMA::Layout_K_SW32_Atom<ElementType>{};
}
else if constexpr (cute::is_same_v<GMMA::Layout_MN_INTER_Atom<ElementType>, SmemLayoutAtom>) {
return GMMA::Layout_K_INTER_Atom<ElementType>{};
}
else {
static_assert(cutlass::detail::dependent_false<SmemLayoutAtom>, "Unsupported Layout_SW_Atom for B SMEM transposition");
}
}
else {
return SmemLayoutAtom{};
}
}
template <class SmemCopyAtom, class ElementType>
constexpr auto
use_universal_transposition() {
if constexpr (sizeof(ElementType) == 1) {
return !cute::is_same_v<GMMA::Layout_MN_SW128_Atom<ElementType>, SmemCopyAtom>;
}
else if constexpr (sizeof(ElementType) == 4){
// Only universal transposition can handle SW64 and Non swizzle SMEM layout
if constexpr (cute::is_same_v<GMMA::Layout_MN_SW64_Atom<ElementType>, SmemCopyAtom> ||
cute::is_same_v<GMMA::Layout_MN_INTER_Atom<ElementType>, SmemCopyAtom>) {
return true;
}
else {
return false;
}
}
else {
static_assert(cutlass::detail::dependent_false<ElementType>, "Unsupported ElementType for B SMEM transposition");
}
}
template<
class TiledMma_,
class SmemLayoutB_,
class SmemLayoutAtomB_,
class ElementB_>
class NoTranspositionOperandB {
public:
using TiledMma = TiledMma_;
using SmemLayoutB = SmemLayoutB_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using ElementB = ElementB_;
constexpr CUTLASS_HOST_DEVICE
NoTranspositionOperandB(
int,
int,
TiledMma,
SmemLayoutB,
SmemLayoutAtomB,
ElementB) { }
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void operator()(
TensorSmemB const&,
TensorTransposedSmemB const&,
int, int) { }
CUTLASS_DEVICE void synchronize(int) { }
CUTLASS_DEVICE void synchronize() { }
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void transpose(
TensorSmemB const&,
TensorTransposedSmemB const&,
int) { }
};
template<
class TiledMma_,
class SmemLayoutB_,
class SmemLayoutAtomB_,
class ElementB_>
class UniversalTranspositionOperandB {
public:
using TiledMma = TiledMma_;
using SmemLayoutB = SmemLayoutB_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using ElementB = ElementB_;
constexpr CUTLASS_HOST_DEVICE
UniversalTranspositionOperandB(
int warp_idx_,
int warp_group_thread_idx_,
TiledMma,
SmemLayoutB,
SmemLayoutAtomB,
ElementB)
: warp_idx(warp_idx_)
, warp_group_thread_idx(warp_group_thread_idx_) { }
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void operator()(
TensorSmemB const& sB,
TensorTransposedSmemB const& gmma_sB,
int read_stage, int current_step) {
if (current_step > 0) {
return;
}
constexpr int NumMathWarpGroup = CUTE_STATIC_V(size(TiledMma{})) / NumThreadsPerWarpGroup;
static_assert(NumMathWarpGroup == 1 ||
(!detail::use_universal_transposition<SmemLayoutAtomB, ElementB>() && NumMathWarpGroup == 2),
"Wrong math warp group number for TransposeB");
constexpr int WarpgroupTileSize = size<1>(SmemLayoutB{}); // A warp group tile would process entire Smem K.
constexpr int BytesPerSmemSwizzleUnit = 16;
constexpr int WarpThreadShapeN = BytesPerSmemSwizzleUnit / sizeof(ElementB);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Universal transposition, need warp_group sync between load and store.
/// The number of reg used depends on the input elementB.
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
In one copy step, a warp group would load WarpgroupTileSize * WarpgroupTileSize tile then store to transposed location.
In warp_group_tile, each warp holds Four WarpTileSize x WarpTileSize elements:
K
------------
| W0 W1 W2 W3 ---
| W0 W1 W2 W3 |
| W0 W1 W2 W3 | --> Copy Step 0
| W0 W1 W2 W3 ---
....
| W0 W1 W2 W3 ---
| W0 W1 W2 W3 |
| W0 W1 W2 W3 | --> Copy Step n
| W0 W1 W2 W3 ---
*/
static_assert((NumThreadsPerWarpGroup % WarpThreadShapeN == 0), "Unsupported warp thread layout.");
constexpr auto WarpgroupThreadLayout = make_layout(make_shape(Int<WarpThreadShapeN>{}, Int<NumThreadsPerWarpGroup / WarpThreadShapeN>{}));
// Get copy tile and partition to each thread
auto sB_tiled_copy = make_tiled_copy(
Copy_Atom<DefaultCopy, ElementB>{},
WarpgroupThreadLayout, // thr_layout
Layout<_1>{} // val_layout
);
static_assert(size(sB_tiled_copy) == size(TiledMma{}), "Wrong thread number in TiledCopy.");
auto sB_thr_copy = sB_tiled_copy.get_thread_slice(warp_group_thread_idx);
Tensor tCsB = sB_thr_copy.partition_S( sB(_,_,read_stage)); // (CPY, CPY_N, CPY_K)
Tensor tCsB_transposed = sB_thr_copy.partition_D(gmma_sB(_,_,read_stage)); // (CPY, CPY_N, CPY_K)
// Divide partitioned tile to limit register usage
constexpr int CopySteps = size<0>(SmemLayoutB{}) / WarpgroupTileSize;
constexpr auto CopyTileShape = make_shape(size<0>(tCsB), Int< size<1>(tCsB) / CopySteps >{}, size<2>(tCsB));
static_assert(size<1>(tCsB) % CopySteps == 0, "CopySteps must evenly divide rank 1 size of partitioned SMEM.");
Tensor tCsB_copy_tile = zipped_divide(tCsB, CopyTileShape);
Tensor tCsB_copy_tile_transposed = zipped_divide(tCsB_transposed, CopyTileShape);
auto transpose_fragment = make_fragment_like(tCsB_copy_tile(_,_0{}));
CUTLASS_PRAGMA_NO_UNROLL
for (int step = 0; step < CopySteps; ++step) {
copy(sB_tiled_copy, tCsB_copy_tile(_,step), transpose_fragment);
// Make sure all elements are read before being overwritten
__syncthreads();
copy(sB_tiled_copy, transpose_fragment, tCsB_copy_tile_transposed(_,step));
}
}
CUTLASS_DEVICE void synchronize(int step) {
if (step == 0) {
// SMEM fence to make sure B is transposed before math
cutlass::arch::fence_view_async_shared();
cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier);
}
}
CUTLASS_DEVICE void synchronize() {
// SMEM fence to make sure B is transposed before math
cutlass::arch::fence_view_async_shared();
cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier);
}
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void transpose(
TensorSmemB const& sB,
TensorTransposedSmemB const& gmma_sB,
int read_stage) {
this->operator()(sB, gmma_sB, read_stage, 0);
synchronize();
}
private:
const int warp_idx;
const int warp_group_thread_idx;
};
template<
class TiledMma_,
class SmemLayoutB_,
class SmemLayoutAtomB_,
class ElementB_>
class AsyncTranspositionOperandB {
public:
using TiledMma = TiledMma_;
using SmemLayoutB = SmemLayoutB_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using ElementB = ElementB_;
static constexpr int Steps = 2;
static constexpr int NumMathWarpGroup = CUTE_STATIC_V(size(TiledMma{})) / NumThreadsPerWarpGroup;
static constexpr int StepsPerWarpGroup = Steps / NumMathWarpGroup;
static_assert(NumMathWarpGroup <= 2,
"Wrong math warp group number for TransposeB");
static constexpr int WarpgroupTileSize = size<1>(SmemLayoutB{}); // A warp group tile would process entire Smem K.
static constexpr int NumWarpsPerWarpGroup = NumThreadsPerWarpGroup / NumThreadsPerWarp;
static constexpr int BytesPerSmemSwizzleUnit = 16;
static constexpr int WarpThreadShapeN = BytesPerSmemSwizzleUnit / sizeof(ElementB);
static constexpr int WarpThreadShapeK = NumThreadsPerWarp / WarpThreadShapeN;
static constexpr int NumWarpTilePerWarpgroupTile = NumWarpsPerWarpGroup * (Steps == 8 ? 2 : 1);
static constexpr int WarpTileSize = WarpgroupTileSize / NumWarpTilePerWarpgroupTile;
static_assert(WarpTileSize >= WarpThreadShapeN && WarpTileSize >= WarpThreadShapeK, "Invaild warp thread shape." );
static constexpr int TilesPerWarp = 2; // Each Warp would process 2 warp_tiles in one step.
static constexpr int64_t WarpTileNCoordLUT = 06723763275316420;
static constexpr int64_t WarpTileKCoordLUT = 05410541064206420;
static constexpr int NumStepsEncoded = 4; // Only encoding first 4 steps into LUT.
static constexpr int MaskPerStep = 07; // Each step is encoded into 3bits,
static constexpr int NumBitsPerStep = 3;
static constexpr int MaskPerWarp = 07777; // Each warp has 4 steps(12 bits)
static constexpr int NumBitsPerWarp = 12;
// Number of warp_group_tiles
static_assert(size<0>(SmemLayoutB{}) % WarpgroupTileSize == 0,
"Copy size must evenly divide SMEM tile.");
static constexpr int WarpgroupTileNum = size<0>(SmemLayoutB{}) / WarpgroupTileSize;
static_assert(size<2>(typename TiledMma::AtomShape_MNK{}) <= WarpThreadShapeK,
"Need to be able to transpose first k-block in the first step");
constexpr CUTLASS_HOST_DEVICE
AsyncTranspositionOperandB(
int warp_idx_,
int warp_group_thread_idx_,
TiledMma,
SmemLayoutB,
SmemLayoutAtomB,
ElementB)
: warp_idx(warp_idx_)
, warp_group_thread_idx(warp_group_thread_idx_)
, warp_idx_in_warp_group(warp_idx_ % NumWarpsPerWarpGroup)
, current_warp_tile_n_coord_LUT((WarpTileNCoordLUT >> ((warp_idx_
% NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp)
, current_warp_tile_k_coord_LUT((WarpTileKCoordLUT >> ((warp_idx_
% NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp) { }
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void operator()(
TensorSmemB const& sB,
TensorTransposedSmemB const& gmma_sB,
int read_stage, int current_step)
{
if (current_step >= StepsPerWarpGroup) {
return;
}
static constexpr auto WarpThreadLayout = make_layout(make_shape(Int<WarpThreadShapeN>{}, Int<WarpThreadShapeK>{}));
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// A warp group uses 2 steps to transpose the whole WarpgroupTileSize x WarpgroupTileSize.
/// In each step, one warp would hold two warp_tiles.
/// Step 0: Step 1:
/// W0 W1 W2 W3 -- -- -- --
/// W1 W0 -- -- -- -- W3 W2
/// W2 -- -- -- -- W3 W0 W1
/// W3 -- -- -- -- W2 W1 W0
///
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// Fully static coord LUT to avoid extra register use.
/// [warp_id][step][warp_tile][n / k]
/// Step 0 Step 1 Step 2 Step 3 Step 4 Step 5 Step 6 Step 7
/// {{{0,0}, {1,1}}, {{2,2}, {3,3}}, {{4,4}, {5,5}}, {{6,6}, {7,7}}, {{4,0}, {0,4}}, {{4,1}, {1,4}}, {{4,2}, {2,4}}, {{4,3}, {3,4}}}, // W0
/// {{{1,0}, {0,1}}, {{3,2}, {2,3}}, {{5,4}, {4,5}}, {{7,6}, {6,7}}, {{5,0}, {0,5}}, {{5,1}, {1,5}}, {{5,2}, {2,5}}, {{5,3}, {3,5}}}, // W1
/// {{{2,0}, {0,2}}, {{3,1}, {1,3}}, {{6,4}, {4,6}}, {{7,5}, {5,7}}, {{6,0}, {0,6}}, {{6,1}, {1,6}}, {{6,2}, {2,6}}, {{6,3}, {3,6}}}, // W2
/// {{{3,0}, {0,3}}, {{2,1}, {1,2}}, {{7,4}, {4,7}}, {{6,5}, {5,6}}, {{7,0}, {0,7}}, {{7,1}, {1,7}}, {{7,2}, {2,7}}, {{7,3}, {3,7}}}, // W3
///
/// Encoding the coord of warp tile0 into two int64_t values.
/// Only encoding Step 0 ~ Step 4, since Step 5 ~ Step 7 have a straightforward pattern.
/// Only encoding warp tile0, since the coords of warp tile1 could be easily deduced from warp tile0.
/// The 2-step transposition and the 8-step transposition share the same encoding.
///
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Divide entire SMEM to multiple warp_tiles
constexpr auto WarpTileShape = make_shape(Int<WarpTileSize>(), Int<WarpTileSize>());
Tensor s_tile = zipped_divide( sB(_,_,read_stage), WarpTileShape);
Tensor s_tile_transposed = zipped_divide(gmma_sB(_,_,read_stage), WarpTileShape);
// Get copy tile
auto sB_tiled_copy = make_tiled_copy(
Copy_Atom<DefaultCopy, ElementB>{},
WarpThreadLayout, // thr_layout
Layout<_1>{} // val_layout
);
static_assert(size(sB_tiled_copy) * NumWarpsPerWarpGroup == size(TiledMma{}) / NumMathWarpGroup, "Wrong thread number in TiledCopy.");
auto sB_thr_copy = sB_tiled_copy.get_thread_slice(warp_group_thread_idx % NumThreadsPerWarp); // slice based on lane_idx
// Construct fragments for transposition
Tensor tmp_tCsB = sB_thr_copy.partition_S(flatten(s_tile(_, make_coord(_0{}, _0{}))));
decltype(make_fragment_like(tmp_tCsB)) transpose_fragments[TilesPerWarp] = {
make_fragment_like(tmp_tCsB),
make_fragment_like(tmp_tCsB)
};
[[maybe_unused]] int step = current_step * NumMathWarpGroup;
if constexpr (NumMathWarpGroup == 2) {
// For 2 math warpgroup, warp idx4~7 is 1st warp group and 8~9 is 2nd, so decide if 2nd warpgroup need warp idx divide 8.
step += warp_idx / (NumWarpsPerWarpGroup * 2);
}
int tmp_warp_tile_n_coord_LUT = current_warp_tile_n_coord_LUT >> (NumBitsPerStep * current_step);
int tmp_warp_tile_k_coord_LUT = current_warp_tile_k_coord_LUT >> (NumBitsPerStep * current_step);
if constexpr (NumMathWarpGroup == 2) {
tmp_warp_tile_n_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2));
tmp_warp_tile_k_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2));
}
// decoding the warp tile coord.
int warp_tile0_n, warp_tile0_k;
if constexpr (StepsPerWarpGroup <= NumStepsEncoded) {
warp_tile0_n = tmp_warp_tile_n_coord_LUT & MaskPerStep;
warp_tile0_k = tmp_warp_tile_k_coord_LUT & MaskPerStep;
} else {
warp_tile0_n = step < NumStepsEncoded ? (tmp_warp_tile_n_coord_LUT & MaskPerStep) : 4 + warp_idx_in_warp_group;
warp_tile0_k = step < NumStepsEncoded ? (tmp_warp_tile_k_coord_LUT & MaskPerStep) : step - 4;
}
int warp_tile1_n = warp_tile0_n == warp_tile0_k ? warp_tile0_n + 1 : warp_tile0_k;
int warp_tile1_k = warp_tile0_n == warp_tile0_k ? warp_tile0_k + 1 : warp_tile0_n;
CUTLASS_PRAGMA_UNROLL
for (int warp_group_tile = 0; warp_group_tile < WarpgroupTileNum; ++warp_group_tile) {
static_assert(TilesPerWarp == 2);
// [warp_tile][n/k]
const int warp_tile_coord[TilesPerWarp][2] = {
// n k
{warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile0_n, warp_tile0_k}, // warp_tile 0
{warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile1_n, warp_tile1_k} // warp_tile 1
};
CUTLASS_PRAGMA_UNROLL
for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) {
Tensor tCsB = sB_thr_copy.partition_S(
flatten(s_tile(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1])))
); // (CPY, CPY_N, CPY_K)
copy(sB_tiled_copy, tCsB, transpose_fragments[warp_tile]);
}
// Make sure elements in two 8x8 warp tiles are all consumed
__syncwarp();
CUTLASS_PRAGMA_UNROLL
for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) {
Tensor tCsB_transposed = sB_thr_copy.partition_D(
flatten(s_tile_transposed(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1])))
); // (CPY, CPY_N, CPY_K)
copy(sB_tiled_copy, transpose_fragments[warp_tile], tCsB_transposed);
}
} // loop warp_group_tile
}
CUTLASS_DEVICE void synchronize(int step) {
if (step < StepsPerWarpGroup) {
// SMEM fence to make sure B is transposed before math
cutlass::arch::fence_view_async_shared();
cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier);
}
}
CUTLASS_DEVICE void synchronize() {
cutlass::arch::fence_view_async_shared();
cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier);
}
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void transpose(
TensorSmemB const& sB,
TensorTransposedSmemB const& gmma_sB,
int read_stage) {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < StepsPerWarpGroup; ++i) {
this->operator()(sB, gmma_sB, read_stage, i);
}
synchronize();
}
private:
const int warp_idx;
const int warp_group_thread_idx;
const int warp_idx_in_warp_group;
const int current_warp_tile_n_coord_LUT;
const int current_warp_tile_k_coord_LUT;
};
template<
class TiledMma_,
class SmemLayoutB_,
class SmemLayoutAtomB_,
class ElementB_>
class AsyncTranspositionOperandB_1BElementB {
public:
static_assert(sizeof(ElementB_) == 1);
using TiledMma = TiledMma_;
using SmemLayoutB = SmemLayoutB_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using ElementB = ElementB_;
static constexpr int Steps = 8;
static constexpr int NumMathWarpGroup = CUTE_STATIC_V(size(TiledMma{})) / NumThreadsPerWarpGroup;
static constexpr int StepsPerWarpGroup = Steps / NumMathWarpGroup;
static_assert(NumMathWarpGroup <= 2,
"Wrong math warp group number for TransposeB");
static constexpr int WarpgroupTileSize = size<1>(SmemLayoutB{}); // A warp group tile would process entire Smem K.
static constexpr int NumWarpsPerWarpGroup = NumThreadsPerWarpGroup / NumThreadsPerWarp;
static constexpr int BytesPerSmemSwizzleUnit = 16;
static constexpr int WarpThreadShapeN = BytesPerSmemSwizzleUnit / sizeof(ElementB);
static constexpr int WarpThreadShapeK = NumThreadsPerWarp / WarpThreadShapeN;
static constexpr int NumWarpTilePerWarpgroupTile = NumWarpsPerWarpGroup * (Steps == 8 ? 2 : 1);
static constexpr int WarpTileSize = WarpgroupTileSize / NumWarpTilePerWarpgroupTile;
static_assert(WarpTileSize >= WarpThreadShapeN && WarpTileSize >= WarpThreadShapeK, "Invaild warp thread shape." );
static constexpr int TilesPerWarp = 2; // Each Warp would process 2 warp_tiles in one step.
static constexpr int64_t WarpTileNCoordLUT = 06723763275316420;
static constexpr int64_t WarpTileKCoordLUT = 05410541064206420;
static constexpr int NumStepsEncoded = 4; // Only encoding first 4 steps into LUT.
static constexpr int MaskPerStep = 07; // Each step is encoded into 3bits,
static constexpr int NumBitsPerStep = 3;
static constexpr int MaskPerWarp = 07777; // Each warp has 4 steps(12 bits)
static constexpr int NumBitsPerWarp = 12;
// Number of warp_group_tiles
static_assert(size<0>(SmemLayoutB{}) % WarpgroupTileSize == 0,
"Copy size must evenly divide SMEM tile.");
static constexpr int WarpgroupTileNum = size<0>(SmemLayoutB{}) / WarpgroupTileSize;
constexpr CUTLASS_HOST_DEVICE
AsyncTranspositionOperandB_1BElementB(
int warp_idx_,
int warp_group_thread_idx_,
TiledMma,
SmemLayoutB,
SmemLayoutAtomB,
ElementB)
: warp_idx(warp_idx_)
, warp_group_thread_idx(warp_group_thread_idx_)
, warp_idx_in_warp_group(warp_idx_ % NumWarpsPerWarpGroup)
, current_warp_tile_n_coord_LUT((WarpTileNCoordLUT >> ((warp_idx_
% NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp)
, current_warp_tile_k_coord_LUT((WarpTileKCoordLUT >> ((warp_idx_
% NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp) { }
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void operator()(
TensorSmemB const& sB,
TensorTransposedSmemB const& gmma_sB,
int read_stage, int current_step)
{
if (current_step > 0) {
return;
}
constexpr auto WarpThreadLayout = make_layout(make_shape(Int<WarpThreadShapeN>{}, Int<WarpThreadShapeK>{}));
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// A warp group uses 8 steps to transpose the whole WarpgroupTileSize x WarpgroupTileSize.
/// Divide a warp_group_tile into 8x8 warp_tiles to further reduce the reg usage.
/// Step 0: Step 1: Step 2: Step 3:
/// W0 W1 W2 W3 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
/// W1 W0 -- -- -- -- -- -- -- -- W3 W2 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
/// W2 -- -- -- -- -- -- -- -- W3 W0 W1 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
/// W3 -- -- -- -- -- -- -- -- W2 W1 W0 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
/// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- --
/// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W1 W0 -- -- -- -- -- -- -- -- W3 W2
/// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W2 -- -- -- -- -- -- -- -- W3 W0 W1
/// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W3 -- -- -- -- -- -- -- -- W2 W1 W0
///
/// Step 4: Step 5: Step 6: Step 7:
/// -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
/// -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
/// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- --
/// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3
/// W0 -- -- -- -- -- -- -- -- W0 -- -- -- -- -- -- -- -- W0 -- -- -- -- -- -- -- -- W0 -- -- -- --
/// W1 -- -- -- -- -- -- -- -- W1 -- -- -- -- -- -- -- -- W1 -- -- -- -- -- -- -- -- W1 -- -- -- --
/// W2 -- -- -- -- -- -- -- -- W2 -- -- -- -- -- -- -- -- W2 -- -- -- -- -- -- -- -- W2 -- -- -- --
/// W3 -- -- -- -- -- -- -- -- W3 -- -- -- -- -- -- -- -- W3 -- -- -- -- -- -- -- -- W3 -- -- -- --
///
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// Fully static coord LUT to avoid extra register use.
/// [warp_id][step][warp_tile][n / k]
/// Step 0 Step 1 Step 2 Step 3 Step 4 Step 5 Step 6 Step 7
/// {{{0,0}, {1,1}}, {{2,2}, {3,3}}, {{4,4}, {5,5}}, {{6,6}, {7,7}}, {{4,0}, {0,4}}, {{4,1}, {1,4}}, {{4,2}, {2,4}}, {{4,3}, {3,4}}}, // W0
/// {{{1,0}, {0,1}}, {{3,2}, {2,3}}, {{5,4}, {4,5}}, {{7,6}, {6,7}}, {{5,0}, {0,5}}, {{5,1}, {1,5}}, {{5,2}, {2,5}}, {{5,3}, {3,5}}}, // W1
/// {{{2,0}, {0,2}}, {{3,1}, {1,3}}, {{6,4}, {4,6}}, {{7,5}, {5,7}}, {{6,0}, {0,6}}, {{6,1}, {1,6}}, {{6,2}, {2,6}}, {{6,3}, {3,6}}}, // W2
/// {{{3,0}, {0,3}}, {{2,1}, {1,2}}, {{7,4}, {4,7}}, {{6,5}, {5,6}}, {{7,0}, {0,7}}, {{7,1}, {1,7}}, {{7,2}, {2,7}}, {{7,3}, {3,7}}}, // W3
///
/// Encoding the coord of warp tile0 into two int64_t values.
/// Only encoding Step 0 ~ Step 4, since Step 5 ~ Step 7 have a straightforward pattern.
/// Only encoding warp tile0, since the coords of warp tile1 could be easily deduced from warp tile0.
/// The 2-step transposition and the 8-step transposition share the same encoding.
///
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Divide entire SMEM to multiple warp_tiles
constexpr auto WarpTileShape = make_shape(Int<WarpTileSize>(), Int<WarpTileSize>());
Tensor s_tile = zipped_divide( sB(_,_,read_stage), WarpTileShape);
Tensor s_tile_transposed = zipped_divide(gmma_sB(_,_,read_stage), WarpTileShape);
// Get copy tile
auto sB_tiled_copy = make_tiled_copy(
Copy_Atom<DefaultCopy, ElementB>{},
WarpThreadLayout, // thr_layout
Layout<_1>{} // val_layout
);
static_assert(size(sB_tiled_copy) * NumWarpsPerWarpGroup == size(TiledMma{}) / NumMathWarpGroup, "Wrong thread number in TiledCopy.");
auto sB_thr_copy = sB_tiled_copy.get_thread_slice(warp_group_thread_idx % NumThreadsPerWarp); // slice based on lane_idx
// Construct fragments for transposition
Tensor tmp_tCsB = sB_thr_copy.partition_S(flatten(s_tile(_, make_coord(_0{}, _0{}))));
decltype(make_fragment_like(tmp_tCsB)) transpose_fragments[TilesPerWarp] = {
make_fragment_like(tmp_tCsB),
make_fragment_like(tmp_tCsB)
};
CUTLASS_PRAGMA_NO_UNROLL
for (int warp_group_tile = 0; warp_group_tile < WarpgroupTileNum; ++warp_group_tile) {
int tmp_warp_tile_n_coord_LUT = current_warp_tile_n_coord_LUT;
int tmp_warp_tile_k_coord_LUT = current_warp_tile_k_coord_LUT;
constexpr int StepsPerWarpGroup = Steps / NumMathWarpGroup;
if constexpr (NumMathWarpGroup == 2) {
tmp_warp_tile_n_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2));
tmp_warp_tile_k_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2));
}
CUTLASS_PRAGMA_NO_UNROLL
for (int step_per_warp_group = 0; step_per_warp_group < StepsPerWarpGroup; ++step_per_warp_group) {
// For 2 math warpgroup, warp idx4~7 is 1st warp group and 8~9 is 2nd, so decide if 2nd warpgroup need warp idx divide 8.
int step = step_per_warp_group * NumMathWarpGroup + warp_idx / (NumWarpsPerWarpGroup * 2);
// decoding the warp tile coord.
int warp_tile0_n = step < NumStepsEncoded ? (tmp_warp_tile_n_coord_LUT & MaskPerStep) : 4 + warp_idx_in_warp_group;
int warp_tile0_k = step < NumStepsEncoded ? (tmp_warp_tile_k_coord_LUT & MaskPerStep) : step - 4;
int warp_tile1_n = warp_tile0_n == warp_tile0_k ? warp_tile0_n + 1 : warp_tile0_k;
int warp_tile1_k = warp_tile0_n == warp_tile0_k ? warp_tile0_k + 1 : warp_tile0_n;
tmp_warp_tile_n_coord_LUT >>= NumBitsPerStep;
tmp_warp_tile_k_coord_LUT >>= NumBitsPerStep;
static_assert(TilesPerWarp == 2);
// [warp_tile][n/k]
const int warp_tile_coord[TilesPerWarp][2] = {
// n k
{warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile0_n, warp_tile0_k}, // warp_tile 0
{warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile1_n, warp_tile1_k} // warp_tile 1
};
CUTLASS_PRAGMA_UNROLL
for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) {
Tensor tCsB = sB_thr_copy.partition_S(
flatten(s_tile(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1])))
); // (CPY, CPY_N, CPY_K)
copy(sB_tiled_copy, tCsB, transpose_fragments[warp_tile]);
}
// Make sure elements in two 8x8 warp tiles are all consumed
__syncwarp();
CUTLASS_PRAGMA_UNROLL
for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) {
Tensor tCsB_transposed = sB_thr_copy.partition_D(
flatten(s_tile_transposed(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1])))
); // (CPY, CPY_N, CPY_K)
copy(sB_tiled_copy, transpose_fragments[warp_tile], tCsB_transposed);
}
} // lock step
} // loop warp_group_tile
}
CUTLASS_DEVICE void synchronize(int step) {
if (step == 0) {
// SMEM fence to make sure B is transposed before math
cutlass::arch::fence_view_async_shared();
cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier);
}
}
CUTLASS_DEVICE void synchronize() {
cutlass::arch::fence_view_async_shared();
cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier);
}
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void transpose(
TensorSmemB const& sB,
TensorTransposedSmemB const& gmma_sB,
int read_stage) {
this->operator()(sB, gmma_sB, read_stage, 0);
synchronize();
}
private:
const int warp_idx;
const int warp_group_thread_idx;
const int warp_idx_in_warp_group;
const int current_warp_tile_n_coord_LUT;
const int current_warp_tile_k_coord_LUT;
};
template<
class TiledMma,
class SmemLayoutB,
class SmemLayoutAtomB,
class ElementB,
bool TransposeB
>
constexpr CUTLASS_HOST_DEVICE
auto
make_transpose_operand_b(
int warp_idx,
int warp_group_thread_idx,
TiledMma,
SmemLayoutB,
SmemLayoutAtomB,
ElementB,
cute::bool_constant<TransposeB>)
{
if constexpr (!TransposeB) {
return NoTranspositionOperandB(
warp_idx, warp_group_thread_idx, TiledMma{},
SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{});
}
else if constexpr (use_universal_transposition<SmemLayoutAtomB, ElementB>()) {
return UniversalTranspositionOperandB(
warp_idx, warp_group_thread_idx, TiledMma{},
SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{});
}
else if constexpr (sizeof(ElementB) == 1) {
return AsyncTranspositionOperandB_1BElementB(
warp_idx, warp_group_thread_idx, TiledMma{},
SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{});
}
else {
return AsyncTranspositionOperandB(
warp_idx, warp_group_thread_idx, TiledMma{},
SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{});
}
}
}; // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace collective
} // namespace transform
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/collective/sm90_wgmma_transpose.hpp/0 | {
"file_path": "include/cutlass/transform/collective/sm90_wgmma_transpose.hpp",
"repo_id": "include",
"token_count": 14474
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of loading small
vectors from the global memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedVectorAccessIterator
///
template <
/// Shape of the vector accessed by the entire threadblock
typename Shape,
/// Shape of the vector accessed by the warp
typename WarpShape,
/// Type of Element
typename Element,
/// Layout of the vector
typename Layout,
/// Number of elements for each access
int ElementsPerAccess,
/// Support residual tile
bool EnableResidualAccess = false
>
class PredicatedVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Vector access iterator specialized for vectors, e.g. scale and bias
/// Thread arrangements are for TensorOps
///
template <
typename Shape_,
typename WarpShape_,
typename Element_,
int ElementsPerAccess,
bool EnableResidualAccess
>
class PredicatedVectorAccessIterator <
Shape_,
WarpShape_,
Element_,
layout::PitchLinear,
ElementsPerAccess,
EnableResidualAccess
> {
public:
using Shape = Shape_;
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
// static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kThreads = 32;
static int const kRowsPerIteration = 8;
static int const kThreadsPerRow = kThreads / kRowsPerIteration;
static int const kThreadsPerRowMask = 0x3;
static int const kIterations = WarpShape::kContiguous / (kThreadsPerRow * kElementsPerAccess);
static int const kWarpCountStrided = Shape::kStrided / WarpShape::kStrided;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Extent of tensor
TensorCoord extent_;
/// pointer offset of each thread
TensorCoord thread_offset_;
/// iteration index
LongIndex iteration_;
/// residual access
bool is_residual_;
/// residual offset of each thread
TensorCoord residual_offset_;
public:
/// Constructs a vector access iterator
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
/// Pointer to the start of the vector
ConstPointer pointer,
/// Extent of vector
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// ID of each participating warp
int warp_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
extent_(extent),
is_residual_(false) {
int warp_offset = (warp_id / kWarpCountStrided) * WarpShape::kContiguous;
// Per-thread offset in logical coordinates of tensor
thread_offset_ = threadblock_offset + TensorCoord(warp_offset, 0) +
TensorCoord((thread_id & kThreadsPerRowMask) * kElementsPerAccess, 0);
set_iteration_index(0);
if(EnableResidualAccess) {
// compute residual offset
typename TensorCoord::Index residual_size = extent_.contiguous() % WarpShape::kContiguous;
if (residual_size) {
is_residual_ = true;
residual_offset_ = make_Coord(residual_size, 0);
}
}
}
/// Construct a PredicatedVectorAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
/// Pointer to start of vector
ConstPointer pointer,
/// Extent of vector
TensorCoord extent,
///< ID of each participating thread
int thread_id,
/// ID of each participating warp
int warp_id)
: PredicatedVectorAccessIterator(pointer, extent, thread_id, warp_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_ = index;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
thread_offset_ =
thread_offset_ +
TensorCoord(WarpShape::kContiguous * tile_offset.contiguous(), 0);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
((thread_offset_.contiguous() + iteration_ * kThreadsPerRow * kElementsPerAccess)
* sizeof_bits<Element>::value / 8));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator &operator++() {
++iteration_;
if(iteration_ >= kIterations)
iteration_ = 0;
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
if(EnableResidualAccess && is_residual_) {
is_residual_ = false;
thread_offset_ += residual_offset_;
}
else
add_tile_offset(TensorCoord(1, 0));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator operator++(int) {
PredicatedVectorAccessIterator self(*this);
operator++();
return self;
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return ((thread_offset_.contiguous() +
iteration_ * kThreadsPerRow * kElementsPerAccess) < extent_.contiguous());
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedVectorAccessIterator for row-major data.
///
template <
typename Shape_,
typename WarpShape_,
typename Element_,
int ElementsPerAccess,
bool EnableResidualAccess
>
class PredicatedVectorAccessIterator<
Shape_,
WarpShape_,
Element_,
layout::RowMajor,
ElementsPerAccess,
EnableResidualAccess
> {
public:
using Shape = Shape_;
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedVectorAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
layout::PitchLinearShape<WarpShape::kColumn, WarpShape::kRow>,
Element,
layout::PitchLinear,
ElementsPerAccess,
EnableResidualAccess>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
static int const kRowsPerIteration = UnderlyingIterator::kRowsPerIteration;
static int const kThreads = UnderlyingIterator::kThreads;
static int const kIterations = UnderlyingIterator::kIterations;
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
///< Pointer to the start of the vector
ConstPointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< ID of each participating warp
int warp_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(pointer, layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id, warp_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedVectorAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
ConstPointer pointer, ///< Pointer to the start of the vector
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
int warp_id ///< ID of each participating warp
)
: PredicatedVectorAccessIterator(pointer, extent, thread_id, warp_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator operator++(int) {
PredicatedVectorAccessIterator self(*this);
operator++();
return self;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
iterator_.advance();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
| include/cutlass/transform/threadblock/predicated_vector_access_iterator.h/0 | {
"file_path": "include/cutlass/transform/threadblock/predicated_vector_access_iterator.h",
"repo_id": "include",
"token_count": 4187
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types
and is safe to use in a union.
*/
#pragma once
#include "cutlass/arch/wmma.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Wmma array type (WmmaFragmentArray holds elements of type nvcuda::wmma::fragment)
template <
/// Element type
typename T,
/// Number of elements in the array
int N,
/// Whether the element type of T is half_t or __half
bool IsHalfType = (platform::is_same<typename T::element_type, cutlass::half_t>::value ||
platform::is_same<typename T::element_type, __half>::value)
>
class WmmaFragmentArray: public Array<T, N, true> {
public:
/// Efficient clear method (override Array::clear())
CUTLASS_HOST_DEVICE
void clear()
{
for(int i = 0; i < Array<T, N, true>::kElements; i++)
{
nvcuda::wmma::fill_fragment((*this)[i], (typename T::element_type)0);
}
}
CUTLASS_HOST_DEVICE
WmmaFragmentArray<T, N>& operator+=(const WmmaFragmentArray<T, N>& rhs)
{
using element_type = typename T::element_type;
plus<T> add;
for (int i = 0; i < Array<T, N, true>::kElements; i++)
{
(*this)[i] = add((*this)[i], rhs[i]);
}
return *this;
}
};
/// Partial specialization for the case in which T::element_type is
/// half_t or __half. This is needed because the cast (typename T::element_type)0
/// in the primary template flags as an error when __CUDA_NO_HALF_CONVERSIONS__
/// is set.
template <
/// Element type
typename T,
/// Number of elements in the array
int N
>
class WmmaFragmentArray<T, N, true>: public Array<T, N, true> {
public:
/// Efficient clear method (override Array::clear())
CUTLASS_HOST_DEVICE
void clear()
{
for(int i = 0; i < Array<T, N, true>::kElements; i++)
{
nvcuda::wmma::fill_fragment((*this)[i], __float2half(0.f));
}
}
CUTLASS_HOST_DEVICE
WmmaFragmentArray<T, N>& operator+=(const WmmaFragmentArray<T, N>& rhs)
{
using element_type = typename T::element_type;
plus<T> add;
for (int i = 0; i < Array<T, N, true>::kElements; i++)
{
(*this)[i] = add((*this)[i], rhs[i]);
}
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
| include/cutlass/wmma_array.h/0 | {
"file_path": "include/cutlass/wmma_array.h",
"repo_id": "include",
"token_count": 1451
} | 39 |
# CUTLASS 3.0
_CUTLASS 3.0 - January 2023_
CUTLASS is a collection of CUDA C++ template abstractions for implementing
high-performance matrix-multiplication (GEMM) at all levels and scales within CUDA.
It incorporates strategies for hierarchical decomposition and data movement similar
to those used to implement cuBLAS. CUTLASS decomposes these "moving parts" into
reusable, modular software components abstracted by C++ template classes. These
components can be specialized
and tuned via custom tiling sizes, data types, and other algorithmic policies. The
resulting flexibility simplifies their use as building blocks within custom kernels
and applications.
To support a wide variety of applications, CUTLASS provides extensive support for
mixed-precision computations, providing specialized data-movement and
multiply-accumulate abstractions for 8-bit integer, half-precision floating
point (FP16), single-precision floating point (FP32), and double-precision floating
point (FP64) types. Furthermore, CUTLASS exploits the _Tensor Cores_ and asynchronous
memory copy operations of the latest NVIDIA GPU architectures.
# What's New in CUTLASS 3.0
For an overview of CUTLASS 3.0's GEMM interface levels,
please refer to the
[CUTLASS 3.0 GEMM API document](./gemm_api_3x.md).
To learn how to migrate code using CUTLASS 2.x's interface
to CUTLASS 3.0, please refer to the
[backwards compatibility document](./cutlass_3x_backwards_compatibility.md).
# GEMM examples
For a code example showing how to define
a GEMM kernel using CUTLASS, please refer to
[the quickstart guide](./quickstart.md).
The [`examples` directory](../../examples)
has a variety of examples.
# Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| media/docs/doxygen_mainpage.md/0 | {
"file_path": "media/docs/doxygen_mainpage.md",
"repo_id": "media",
"token_count": 898
} | 40 |
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[project]
name = "nvidia-cutlass"
version = "3.5.0.0"
description = "CUTLASS"
readme = "README.md"
requires-python = ">=3.8"
license = {text = "BSD-3-Clause"}
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
]
dependencies = [
"cuda-python>=11.8.0",
"networkx",
"numpy",
"pydot",
"scipy",
"treelib"
]
[project.urls]
"Homepage" = "https://github.com/nvidia/cutlass"
"Bug Tracker" = "https://github.com/nvidia/cutlass/issues"
| pyproject.toml/0 | {
"file_path": "pyproject.toml",
"repo_id": "pyproject.toml",
"token_count": 252
} | 41 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from pycute import product
from cutlass_library import DataTypeSize, DataTypeTag
from cutlass.backend.evt.ir import (
# Load Node
AccumulatorImpl,
AuxLoadImpl,
ColumnBroadcastImpl,
LoadNode,
LoadSrcImpl,
RowBroadcastImpl,
ScalarBroadcastImpl,
# Compute Node
ComputeImpl,
ComputeNode,
# Store Node
AuxStoreImpl,
ColumnReductionImpl,
RowReductionImpl,
ScalarReductionImpl,
StoreNode,
StoreDImpl,
)
from cutlass.backend.library import (
FloatRoundStyleTag,
FunctionalOp,
op_tag,
)
class Sm90AccumulatorImpl(AccumulatorImpl):
@property
def type_decl(self):
"""
Return the string defining the type
"""
if self._type_decl is not None:
return self._type_decl
self._type_decl = f"""\nusing {self.name_camel} = cutlass::epilogue::fusion::Sm90AccFetch;\n"""
return self._type_decl
class Sm90LoadSrcImpl(LoadSrcImpl):
@property
def type_decl(self):
"""
Return the string defining the type
"""
if self._type_decl is not None:
return self._type_decl
self._type_decl = f"""
using ElementC = {DataTypeTag[self.element]};
using StrideC = {self.stride_mnl};
using {self.name_camel} = cutlass::epilogue::fusion::Sm90SrcFetch<{DataTypeTag[self.element]}>;
"""
return self._type_decl
class Sm90AuxLoadImpl(AuxLoadImpl):
@property
def descriptor(self) -> str:
"""
Descriptor for Aux Load
"""
return f"{self.name_camel}Descriptor"
def decl_descriptor(self) -> str:
"""
Declare the descriptor type
"""
return f"\nusing {self.descriptor} = cutlass::epilogue::collective::detail::AuxLoadDescriptor<EpilogueDescriptor, {self.stride_mnl}, {DataTypeTag[self.element]}>;\n"
@property
def type_decl(self):
"""
Return the string defining the type
"""
if self._type_decl is not None:
return self._type_decl
self._type_decl = self.decl_descriptor()
self._type_decl += f"""
using {self.name_camel} = cutlass::epilogue::fusion::Sm90AuxLoad<
{self.descriptor}::Stages, typename {self.descriptor}::EpilogueTile, {DataTypeTag[self.element]},
{self.stride_mnl}, typename {self.descriptor}::SmemLayoutAtom, typename {self.descriptor}::CopyOpS2R
>;
"""
return self._type_decl
def get_smem_size(self, cta_tile_mnk, epilogue_tile_mn, stages_c, stages_d, epi_tiles):
"""
Get the shared memory size based on epilogue_tile_mn, stages_c, and stages_d
"""
return (DataTypeSize[self.element] * stages_c * product(epilogue_tile_mn) // 8, 128)
class Sm90ScalarBroadcastImpl(ScalarBroadcastImpl):
def __init__(self, node: LoadNode) -> None:
super().__init__(node)
self.broadcast_count = 1
self.reduction_fn = FunctionalOp.Multiplies
@property
def type_decl(self):
"""
Return the string defining the type
"""
if self._type_decl is not None:
return self._type_decl
self._type_decl = f"""
using {self.name_camel} = cutlass::epilogue::fusion::Sm90ScalarBroadcast<
{DataTypeTag[self.element]}, {self.stride_mnl}, {self.broadcast_count}, {op_tag(self.reduction_fn)}
>;
"""
return self._type_decl
class Sm90RowBroadcastImpl(RowBroadcastImpl):
@property
def descriptor(self) -> str:
"""
Descriptor for Aux Load
"""
return f"{self.name_camel}Descriptor"
def decl_descriptor(self) -> str:
"""
Declare the descriptor type
"""
return f"\nusing {self.descriptor} = cutlass::epilogue::collective::detail::RowBroadcastDescriptor<EpilogueDescriptor, {DataTypeTag[self.element]}>;\n"
@property
def type_decl(self):
"""
Return the string defining the type
"""
if self._type_decl is not None:
return self._type_decl
self._type_decl = self.decl_descriptor()
self._type_decl += f"""
using {self.name_camel} = cutlass::epilogue::fusion::Sm90RowBroadcast<
{self.descriptor}::Stages, typename EpilogueDescriptor::TileShape,
typename {self.descriptor}::Element, {self.stride_mnl}
>;
"""
return self._type_decl
def get_smem_size(self, cta_tile_mnk, epilogue_tile_mn, stages_c, stages_d, epi_tiles):
"""
Get the shared memory size based on epilogue_tile_mn, stages_c, and stages_d
"""
stages = (stages_c + epi_tiles - 1) // epi_tiles + 1
return (DataTypeSize[self.element] * cta_tile_mnk[1] * stages // 8, 16)
class Sm90ColumnBroadcastImpl(ColumnBroadcastImpl):
@property
def type_decl(self):
"""
Return the string defining the type
"""
if self._type_decl is not None:
return self._type_decl
self._type_decl = f"""
using {self.name_camel} = cutlass::epilogue::fusion::Sm90ColBroadcast<
0 /*Stages*/, typename EpilogueDescriptor::TileShape, {DataTypeTag[self.element]},
{self.stride_mnl}
>;
"""
return self._type_decl
class Sm90ComputeImpl(ComputeImpl):
@property
def type_decl(self):
"""
Return the string defining the type
"""
if self._type_decl is not None:
return self._type_decl
self._type_decl = f"""
using {self.name_camel} = cutlass::epilogue::fusion::Sm90Compute<
{op_tag(self.fn)}, {DataTypeTag[self.element_output]}, {DataTypeTag[self.element_compute]},
{FloatRoundStyleTag[self.round_style]}
>;
"""
return self._type_decl
class Sm90AuxStoreImpl(AuxStoreImpl):
@property
def descriptor(self) -> str:
"""
Descriptor for Aux Load
"""
return f"{self.name_camel}Descriptor"
def decl_descriptor(self) -> str:
"""
Declare the descriptor type
"""
return f"""
using {self.descriptor} = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, {self.stride_mnl}, {DataTypeTag[self.element]}
>;
"""
@property
def type_decl(self):
"""
Return the string defining the type
"""
if self._type_decl is not None:
return self._type_decl
self._type_decl = self.decl_descriptor()
self._type_decl += f"""
using {self.name_camel} = cutlass::epilogue::fusion::Sm90AuxStore<
{self.descriptor}::Stages, typename {self.descriptor}::EpilogueTile, {DataTypeTag[self.element]},
{FloatRoundStyleTag[self.round_style]}, {self.stride_mnl}, typename {self.descriptor}::SmemLayoutAtom,
typename {self.descriptor}::CopyOpR2S
>;
"""
return self._type_decl
def get_smem_size(self, cta_tile_mnk, epilogue_tile_mn, stages_c, stages_d, epi_tiles):
"""
Get the shared memory size based on epilogue_tile_mn, stages_c, and stages_d
"""
return (DataTypeSize[self.element] * stages_d * product(epilogue_tile_mn) // 8, 128)
class Sm90StoreDImpl(StoreDImpl):
@property
def type_decl(self):
"""
Return the string defining the type
"""
return f"""
using ElementD = {DataTypeTag[self.element]};
using StrideD = {self.stride_mnl};
"""
class Sm90ColumnReductionImpl(ColumnReductionImpl):
@property
def type_decl(self):
"""
Return the string defining the type
"""
if self._type_decl is not None:
return self._type_decl
self._type_decl = f"""
using {self.name_camel} = cutlass::epilogue::fusion::Sm90ColReduction<
{op_tag(self.reg_reduce_fn)}, {op_tag(self.reg_reduce_fn)}, {op_tag(self.gmem_reduce_fn)}, 0,
typename EpilogueDescriptor::TileShape, {DataTypeTag[self.element]},
{DataTypeTag[self.element_compute]}, {FloatRoundStyleTag[self.round_style]},
{self.stride_mnl}
>;
"""
return self._type_decl
class Sm90RowReductionImpl(RowReductionImpl):
@property
def type_decl(self):
"""
Return the string defining the type
"""
if self._type_decl is not None:
return self._type_decl
self._type_decl = f"""
using {self.name_camel} = cutlass::epilogue::fusion::Sm90RowReduction<
{op_tag(self.reg_reduce_fn)}, {op_tag(self.reg_reduce_fn)}, {op_tag(self.gmem_reduce_fn)}, 0 /* Stages */,
typename EpilogueDescriptor::TileShape, {DataTypeTag[self.element]},
{DataTypeTag[self.element_compute]}, {FloatRoundStyleTag[self.round_style]},
{self.stride_mnl}
>;
"""
return self._type_decl
class Sm90ScalarReductionImpl(ScalarReductionImpl):
@property
def type_decl(self):
"""
Return the string defining the type
"""
if self._type_decl is not None:
return self._type_decl
self._type_decl = f"""
using {self.name_camel} = cutlass::epilogue::fusion::Sm90ScalarReduction<
{op_tag(self.reg_reduce_fn)}, {op_tag(self.gmem_reduce_fn)},
{DataTypeTag[self.element]}, {DataTypeTag[self.element_compute]},
{FloatRoundStyleTag[self.round_style]}, {self.stride_mnl}
>;
"""
return self._type_decl
| python/cutlass/backend/evt/backend/sm90_nodes.py/0 | {
"file_path": "python/cutlass/backend/evt/backend/sm90_nodes.py",
"repo_id": "python",
"token_count": 4460
} | 42 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Construct the epilogue visitor argument type
"""
from cutlass.backend.c_types import visitor_factory
from cutlass.backend.evt.ir import TopoVisitorNode
from cutlass.backend.evt.passes.pass_dag_2_tree import PassDAG2Tree
from cutlass.backend.evt.passes.pass_get_impl import PassGetImpl
from cutlass.backend.evt.passes.pass_manager import EVTPassBase
from cutlass.backend.evt.passes.pass_shape_type_propagation import PassShapeTypePropagation
class PassGetArgumentType(EVTPassBase):
"""
Construct the epilogue visitor argument type
"""
dependencies = [
PassShapeTypePropagation, # The Layout of all nodes must be set
PassDAG2Tree, # The type of each node must be set
PassGetImpl # The DAG subgraphs must be set
]
def requires(self) -> None:
# Check "D" is in the node list
if self.cc == 90 and (not self.dag_ir.has_node("D")):
raise SyntaxError(
"Sm90 EVT requires the epilogue to have a returned tensor D, "
"but the variable 'D' is not found in the return values.")
def call(self):
nodes = self.dag_ir.nodes_topological_order()
self.argument_types = {}
for node in nodes:
meta = self.dag_ir.get_node_meta(node)
if not meta.disabled:
self.argument_types[node] = meta.underlying_impl.argument_type
if node == "D" and self.cc == 90:
continue
if isinstance(meta, TopoVisitorNode):
self.get_dag_argument_type(node)
else:
self.get_evt_argument_type(node)
self.cc_specific_method(self.set_argument_type)()
def get_evt_argument_type(self, node):
# Sort the input nodes by edge weight
input_types = [self.argument_types[child] for child in self.dag_ir.get_all_inputs(node)]
if len(input_types) > 0:
self.argument_types[node] = visitor_factory(
input_types + [self.argument_types[node],], self.dag_ir.get_all_inputs(node) + [node,])
def get_dag_argument_type(self, node):
meta = self.dag_ir.get_node_meta(node)
subgraph = meta.subgraph
subgraph_nodes = subgraph.nodes_topological_order()
# Visit the unvisited nodes in subgraph
for n in subgraph_nodes:
m = subgraph.get_node_meta(n)
if m.disabled:
continue
else:
self.argument_types[n] = m.underlying_impl.argument_type
input_types = [self.argument_types[child] for child in subgraph_nodes[:-1]]
if len(input_types) > 0:
self.argument_types[node] = visitor_factory(input_types, subgraph_nodes[:-1])
def set_argument_type(self):
pass
def sm90_set_argument_type(self):
self.dag_ir.epilogue_thread_type = self.argument_types[self.dag_ir.get_all_inputs("D")[0]]
# Get the tensorD argument type
self.dag_ir.arg_d_type = self.dag_ir.get_node_meta("D").underlying_impl.argument_type_d
# Get the tensorC argument type
if self.dag_ir.has_node("C"):
self.dag_ir.arg_c_type = self.dag_ir.get_node_meta("C").underlying_impl.argument_type_c
else:
self.dag_ir.arg_c_type = self.dag_ir.arg_d_type
def sm80_set_argument_type(self):
nodes = self.dag_ir.nodes_topological_order()
self.dag_ir.epilogue_thread_type = self.argument_types[nodes[-1]]
| python/cutlass/backend/evt/passes/pass_argument_type.py/0 | {
"file_path": "python/cutlass/backend/evt/passes/pass_argument_type.py",
"repo_id": "python",
"token_count": 2058
} | 43 |
################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import ctypes
from typing import Union
from cuda import cuda, cudart
import numpy as np
from cutlass_library import (
DataTypeNames,
DataTypeSize,
DataTypeTag,
LayoutType,
SubstituteTemplate
)
import cutlass
from cutlass.backend.c_types import MatrixCoord_, TensorRef2D_, get_reduction_params
from cutlass.backend.frontend import NumpyFrontend, TorchFrontend
from cutlass.backend.library import TensorDescription
from cutlass.backend.memory_manager import DevicePtrWrapper
from cutlass.backend.operation import ExecutableOperation, LaunchConfiguration
from cutlass.shape import MatrixCoord
from cutlass.utils.datatypes import is_numpy_tensor, is_torch_tensor
class ReductionOperation:
pass
class ReductionArguments:
"""
Arguments of reduction
"""
def __init__(
self,
operation: ReductionOperation,
problem_size: "list[int]",
partitions: int,
workspace: cuda.CUdeviceptr,
destination: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]",
source: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]",
**kwargs,
) -> None:
# tensor_C can be interpreted as the bias with bias=True in keyword args
if "bias" in kwargs.keys():
self.bias = kwargs["bias"]
else:
# by default, tensor_C is not bias
self.bias = False
if "stream" in kwargs.keys():
self.stream = kwargs["stream"]
else:
self.stream = cuda.CUstream(0)
self.operation = operation
self.ptr_workspace = workspace
# number of split-k partitions
self.partitions = partitions
if is_numpy_tensor(destination):
self.host_D = destination
self.destination_buffer = NumpyFrontend.argument(destination, True)
self.source_buffer = NumpyFrontend.argument(source, False)
self.ptr_destination = cuda.CUdeviceptr(self.destination_buffer.ptr)
self.ptr_source = cuda.CUdeviceptr(self.source_buffer.ptr)
elif is_torch_tensor(destination):
self.ptr_destination = TorchFrontend.argument(destination)
self.ptr_source = TorchFrontend.argument(source)
elif isinstance(destination, cuda.CUdeviceptr):
self.ptr_destination = destination
self.ptr_source = source
else:
raise TypeError("unknown Type")
self.problem_size = MatrixCoord_(problem_size[0], problem_size[1])
self.partition_stride = (
problem_size[0] * problem_size[1] * DataTypeSize[operation.C.element] // 8
)
if "output_op" in kwargs.keys():
self.output_op = kwargs["output_op"]
else:
self.output_op = self.operation.epilogue_type(1.0, 0.0)
self.get_arguments()
@staticmethod
def get_tensor_ref(
extent: "tuple[int]",
device_ptr: cuda.CUdeviceptr,
layout: LayoutType,
):
if layout == LayoutType.RowMajor:
return TensorRef2D_(int(device_ptr), extent[1])
else:
raise ValueError(f"Unknown layout type {layout}")
def get_arguments(self):
ref_workspace = ReductionArguments.get_tensor_ref(
extent=[
self.problem_size.row,
self.problem_size.column,
],
device_ptr=self.ptr_workspace,
layout=LayoutType.RowMajor,
)
if self.bias:
ref_source = ReductionArguments.get_tensor_ref(
extent=[0, 0],
device_ptr=self.ptr_source,
layout=LayoutType.RowMajor,
)
else:
ref_source = ReductionArguments.get_tensor_ref(
extent=[
self.problem_size.row,
self.problem_size.column,
],
device_ptr=self.ptr_source,
layout=LayoutType.RowMajor,
)
ref_destination = ReductionArguments.get_tensor_ref(
extent=[
self.problem_size.row,
self.problem_size.column,
],
device_ptr=self.ptr_destination,
layout=LayoutType.RowMajor,
)
self.c_arguments = self.operation.argument_type(
self.problem_size,
self.partitions,
self.partition_stride,
ref_workspace,
ref_destination,
ref_source,
self.output_op,
)
params_ = self.operation.rt_module.get_args(ctypes.byref(self.c_arguments))
self.host_workspace = bytearray(params_.contents)
def sync(self):
(err,) = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error {str(err)}")
if hasattr(self, "host_D"):
(err,) = cuda.cuMemcpyDtoH(
self.host_D,
self.ptr_destination,
self.host_D.size * self.host_D.itemsize,
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
self.free()
def free(self):
"""
Frees allocated device-side memory
"""
# Free any device memory allocated manually
if not cutlass.use_rmm:
for attr in ["destination_buffer", "source_buffer"]:
if hasattr(self, attr):
buf = getattr(self, attr)
if isinstance(buf, DevicePtrWrapper):
err, = cudart.cudaFree(buf.ptr)
if err != cudart.cudaError_t.cudaSuccess:
raise RuntimeError(f"cudaFree failed with error {err}")
del buf
class ReductionRT(ExecutableOperation):
"""
ReductionRT manages the CUTLASS runtime components for reduction
"""
KernelTemplate = r"""
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix} op;
op(params, *shared_storage);
}
"""
HostTemplate = r"""
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
// Get the params as byte array
char* ${operation_name}_get_params(${operation_name}${operation_suffix}::Params* params){
char *bytes = ((char*)(params));
char *output = new char[sizeof(${operation_name}${operation_suffix}::Params)];
for (unsigned int i = 0; i < sizeof(${operation_name}${operation_suffix}::Params); i ++)
output[i] = bytes[i];
return output;
}
}
"""
def __init__(self, operation: ReductionOperation):
super().__init__(operation)
self.operation: ReductionOperation = operation
self.emitter = EmitReductionInstance("_type")
self.elements_per_access = self.operation.count
(
self.argument_type,
self.epilogue_type,
) = get_reduction_params(operation.epilogue_functor)
self.argtype = [ctypes.POINTER(self.argument_type)]
def emit(self):
return self.emitter.emit(self.operation)
def plan(self, arguments: ReductionArguments):
block_shape = [
self.operation.shape.column // self.elements_per_access,
self.operation.shape.row,
1,
]
grid_shape = [
(arguments.problem_size.row + self.operation.shape.row - 1)
// self.operation.shape.row,
(arguments.problem_size.column + self.operation.shape.column - 1)
// self.operation.shape.column,
1,
]
return LaunchConfiguration(
grid_shape,
block_shape,
self.shared_memory_capacity,
)
def initialize(self):
(err,) = cuda.cuFuncSetAttribute(
self.kernel,
attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
value=self.shared_memory_capacity,
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error: {err}")
class ReductionOperation:
"""
CUTLASS reduction Operation
"""
def __init__(
self,
shape: MatrixCoord,
C: TensorDescription,
element_accumulator,
element_workspace=None,
element_compute=None,
epilogue_functor=None,
count: int = 1,
partitions_per_stage: int = 4,
) -> None:
self.shape = shape
self.epilogue_functor = epilogue_functor
self.element_accumulator = element_accumulator
if element_workspace is None:
self.element_workspace = element_accumulator
else:
self.element_workspace = element_workspace
if element_compute is None:
self.element_compute = element_accumulator
else:
self.element_compute = element_compute
self.element_output = C.element
self.C: TensorDescription = C
# Reduce op processing size
self.count: int = count
# Number of partitions to reduce per stage
self.partitions_per_stage: int = partitions_per_stage
self.rt_module: ReductionRT = ReductionRT(self)
self.argument_type = self.rt_module.argument_type
self.epilogue_type = self.rt_module.epilogue_type
def extended_name(self):
extend_name = "${element_workspace}_${element_accumulator}_${element_compute}_${element_output}"
return SubstituteTemplate(
extend_name,
{
"element_workspace": DataTypeNames[self.element_workspace],
"element_accumulator": DataTypeNames[self.element_accumulator],
"element_compute": DataTypeNames[self.element_compute],
"element_output": DataTypeNames[self.element_output],
},
)
def configuration_name(self):
"""The full procedural name indicates architecture, extended name, tile size"""
configuration_name = "cutlass_reduce_split_k_${extended_name}_${threadblock}"
threadblock = "%dx%d" % (
self.shape.row,
self.shape.column,
)
return SubstituteTemplate(
configuration_name,
{
"extended_name": self.extended_name(),
"threadblock": threadblock,
},
)
def procedural_name(self):
"""The full procedural name indicates architeture, extended name, tile size"""
return self.configuration_name()
def run(self, arguments: ReductionArguments) -> cuda.CUresult:
"""
Configure and launch the cuda kernel with input arguments
"""
launch_config = self.rt_module.plan(arguments)
host_workspace = arguments.host_workspace
device_workspace = None
err = self.rt_module.run(
host_workspace,
device_workspace,
launch_config,
arguments.stream
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error {str(err)}")
return err
class EmitReductionInstance:
def __init__(self, operation_suffix="") -> None:
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/device/gemm.h",
"cutlass/gemm/device/gemm_universal_adapter.h",
"cutlass/gemm/kernel/default_gemm_universal.h",
"cutlass/reduction/kernel/reduce_split_k.h",
"cutlass/reduction/thread/reduction_operators.h",
]
self.template = """
// Reduction kernel instance
using ${operation_name}_base =
typename cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<${shape_row}, ${shape_column}>,
${epilogue_functor},
cutlass::reduction::thread::ReduceAdd<
${element_accumulator},
${element_output},
${count}>,
${partition_per_stage}>;
struct ${operation_name}${operation_suffix}:
public ${operation_name}_base { };
"""
def emit(self, operation: ReductionOperation):
vector_length_bits = min(operation.C.alignment * DataTypeSize[operation.C.element], 128)
epilogue_vector_length = vector_length_bits // DataTypeSize[operation.C.element]
values = {
"operation_name": operation.configuration_name(),
"operation_suffix": self.operation_suffix,
"shape_row": str(operation.shape.row),
"shape_column": str(operation.shape.column),
"epilogue_functor": operation.epilogue_functor.emit(),
"element_output": DataTypeTag[operation.element_output],
"epilogue_vector_length": str(epilogue_vector_length),
"element_accumulator": DataTypeTag[operation.element_accumulator],
"element_compute": DataTypeTag[operation.element_compute],
"element_workspace": DataTypeTag[operation.element_workspace],
"count": str(operation.count),
"partition_per_stage": str(operation.partitions_per_stage),
}
return SubstituteTemplate(self.template, values)
| python/cutlass/backend/reduction_operation.py/0 | {
"file_path": "python/cutlass/backend/reduction_operation.py",
"repo_id": "python",
"token_count": 6773
} | 44 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for expressing shapes
"""
from cutlass_library import (
ConvMode,
ConvKind,
LayoutType
)
from cutlass.backend.c_types import (
Conv2DProblemSize_,
GemmCoord_,
GemmCoordBatched_
)
class MatrixCoord:
def __init__(self, row, col):
self._row = row
self._col = col
@property
def row(self):
return self._row
@property
def column(self):
return self._col
def leading_dimension(self, layout: LayoutType) -> int:
"""
Returns the leading dimension for a matrix with layout ``layout`` and shape provided by the MatrixCoord.
:param layout: layout of matrix
:type layout: cutlass_library.LayoutType
:returns: leading dimension
:rtype: int
"""
if layout == LayoutType.RowMajor:
return self._col
elif layout == LayoutType.ColumnMajor:
return self._row
else:
raise Exception(f'Unsupported layout for leading dimension calculation: {layout}')
class GemmCoord:
def __init__(self, m: int, n: int, k: int):
self._m = m
self._n = n
self._k = k
@property
def m(self) -> int:
return self._m
@property
def n(self) -> int:
return self._n
@property
def k(self) -> int:
return self._k
@property
def mk(self) -> MatrixCoord:
return MatrixCoord(self._m, self._k)
@property
def mn(self) -> MatrixCoord:
return MatrixCoord(self._m, self._n)
@property
def kn(self) -> MatrixCoord:
return MatrixCoord(self._k, self._n)
@property
def ctype(self) -> GemmCoord_:
return GemmCoord_(self._m, self._n, self._k)
def batched_ctype(self, batch_count: int) -> GemmCoordBatched_:
return GemmCoordBatched_(self._m, self._n, self._k, batch_count)
class Conv2DProblemSize:
def __init__(
self, n: int, h: int, w: int, c: int,
k: int, r: int, s: int, c_: int,
pad_h: int, pad_w: int, stride_h: int, stride_w: int,
dilation_h: int, dilation_w: int, mode: ConvMode=ConvMode.CrossCorrelation,
split_k_slices: int=1, groups: int=1):
self.N = n
self.H = h
self.W = w
self.C = c
self.K = k
self.R = r
self.S = s
self.pad_h = pad_h
self.pad_w = pad_w
self.stride_h = stride_h
self.stride_w = stride_w
self.dilation_h = dilation_h
self.dilation_w = dilation_w
self.mode = int(mode)
self.split_k_slices = split_k_slices
self.groups = groups
self.P = ((h + pad_h * 2 - r * dilation_h) // stride_h) + 1
self.Q = ((w + pad_w * 2 - s * dilation_w) // stride_w) + 1
@property
def ctype(self) -> Conv2DProblemSize_:
return Conv2DProblemSize_(self)
def implicit_gemm_size(self, kind: ConvKind):
if kind == ConvKind.Fprop:
return GemmCoord(
self.N * self.P * self.Q,
self.K,
self.R * self.S * self.C // self.groups
)
elif kind == ConvKind.Dgrad:
return GemmCoord(
self.N * self.H * self.W,
self.C,
self.R * self.S * self.K
)
elif kind == ConvKind.Wgrad:
return GemmCoord(
self.K,
self.R * self.S * self.C,
self.N * self.P * self.Q
)
@staticmethod
def from_sizes(input_size, weight_size):
K, R, S, _ = weight_size
pad_h = R // 2
pad_w = S // 2
stride_h = 1
stride_w = 1
dilation_h = 1
dilation_w = 1
return Conv2DProblemSize(
*input_size,
*weight_size,
pad_h, pad_w,
stride_h, stride_w,
dilation_h, dilation_w
)
| python/cutlass/shape.py/0 | {
"file_path": "python/cutlass/shape.py",
"repo_id": "python",
"token_count": 2440
} | 45 |
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
| python/docs_src/Makefile/0 | {
"file_path": "python/docs_src/Makefile",
"repo_id": "python",
"token_count": 252
} | 46 |
.. CUTLASS Python interface documentation master file, created by
sphinx-quickstart on Mon Feb 13 17:57:39 2023.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
.. include:: ../../README.md
:start-line: 1
:parser: markdown
.. toctree::
:hidden:
Home <self>
.. toctree::
:hidden:
:caption: Getting Started:
install.md
Getting Started <externals/00_basic_gemm.nblink>
contribute.md
.. toctree::
:hidden:
:caption: Python Documentation:
modules.rst
.. toctree::
:hidden:
:caption: Examples and Tutorials:
examples.rst
.. toctree::
:hidden:
:caption: Advanced:
.. toctree::
:hidden:
:caption: FAQ:
.. toctree::
:hidden:
:caption: Reference:
Github <https://github.com/NVIDIA/cutlass>
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| python/docs_src/source/index.rst/0 | {
"file_path": "python/docs_src/source/index.rst",
"repo_id": "python",
"token_count": 351
} | 47 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <iomanip>
#include <utility>
#include <cute/container/array_subbyte.hpp>
#include <cute/tensor.hpp>
#include <cute/numeric/numeric_types.hpp>
TEST(CuTe_core, ArraySubbyte)
{
using namespace cute;
{
array_subbyte<int4_t, 10> array0{};
array_subbyte<int4_t, 5> array1{};
fill(array0, int4_t(0));
fill(array1, int4_t(1));
for (size_t i = 0; i < array1.size(); ++i) {
array0[i+5] = array1[i];
}
EXPECT_EQ(int4_t(array0.back()), int4_t(1));
for (size_t i = 0; i < array1.size(); ++i) {
EXPECT_EQ(int4_t(array0[i]), int4_t(int(i) / 5));
}
}
{
array_subbyte<uint8_t, 14> a{};
//std::cout << sizeof_bits<decltype(a)>::value << std::endl;
EXPECT_EQ(cute::sizeof_bits_v<decltype(a)>, 14*8);
fill(a, uint8_t(13));
for (int i = 0; i < int(a.size()); ++i) {
//std::cout << i << ": " << int(a[i]) << " -> ";
EXPECT_EQ(a[i], uint8_t(13));
a[i] = uint8_t(i);
//std::cout << int(a[i]) << std::endl;
EXPECT_EQ(a[i], uint8_t(i));
}
//std::cout << std::endl;
}
{
array_subbyte<int4_t, 14> a{};
//std::cout << sizeof_bits<decltype(a)>::value << std::endl;
EXPECT_EQ(cute::sizeof_bits_v<decltype(a)>, 14/2*8);
fill(a, int4_t(-5));
for (int i = 0; i < int(a.size()); ++i) {
//std::cout << i << ": " << int4_t(a[i]) << " -> ";
EXPECT_EQ(int4_t(a[i]), int4_t(-5));
a[i] = int4_t(i);
//std::cout << int4_t(a[i]) << std::endl;
EXPECT_EQ(int4_t(a[i]), int4_t(i));
}
//std::cout << std::endl;
}
{
array_subbyte<uint2_t, 14> a{};
//std::cout << sizeof_bits<decltype(a)>::value << std::endl;
EXPECT_EQ(cute::sizeof_bits_v<decltype(a)>, 4*8);
fill(a, uint2_t(-5));
for (int i = 0; i < int(a.size()); ++i) {
//std::cout << i << ": " << uint2_t(a[i]) << " -> ";
EXPECT_EQ(uint2_t(a[i]), uint2_t(-5));
a[i] = uint2_t(i);
//std::cout << uint2_t(a[i]) << std::endl;
EXPECT_EQ(uint2_t(a[i]), uint2_t(i));
}
//std::cout << std::endl;
}
{
array_subbyte<bool, 14> a{};
//std::cout << sizeof_bits<decltype(a)>::value << std::endl;
EXPECT_EQ(cute::sizeof_bits_v<decltype(a)>, 2*8);
fill(a, bool(1));
for (int i = 0; i < int(a.size()); ++i) {
//std::cout << i << ": " << bool(a[i]) << " -> ";
EXPECT_EQ(a[i], bool(1));
a[i] = bool(i % 2);
//std::cout << bool(a[i]) << std::endl;
EXPECT_EQ(a[i], bool(i % 2));
}
//std::cout << std::endl;
}
}
TEST(CuTe_core, Subbyte_iterator)
{
using namespace cute;
{
array_subbyte<uint8_t, 15> a{};
auto tensor = make_tensor(subbyte_iterator<uint8_t>(a.raw_data()), make_shape(15));
fill(a, uint8_t(13));
for (int i = 0; i < int(a.size()); ++i) {
EXPECT_EQ(uint8_t(tensor(i)), 13);
tensor(i) = uint8_t(i);
EXPECT_EQ(a[i], uint8_t(tensor(i)));
}
}
{
array_subbyte<int4_t, 15> a{};
auto tensor = make_tensor(subbyte_iterator<int4_t>(a.raw_data()), make_shape(15));
fill(a, int4_t(-5));
for (int i = 0; i < int(a.size()); ++i) {
EXPECT_EQ(int4_t(tensor(i)), int4_t(-5));
tensor(i) = int4_t(i);
EXPECT_EQ(int4_t(a[i]), int4_t(tensor(i)));
}
}
{
array_subbyte<uint2_t, 15> a{};
auto tensor = make_tensor(subbyte_iterator<uint2_t>(a.raw_data()), make_shape(15));
fill(a, uint2_t(-5));
for (int i = 0; i < int(a.size()); ++i) {
EXPECT_EQ(uint2_t(tensor(i)), uint2_t(-5));
tensor(i) = uint2_t(i);
EXPECT_EQ(uint2_t(a[i]), uint2_t(tensor(i)));
}
}
{
array_subbyte<bool, 15> a{};
auto tensor = make_tensor(subbyte_iterator<bool>(a.raw_data()), make_shape(15));
fill(a, bool(1));
for (int i = 0; i < int(a.size()); ++i) {
EXPECT_EQ(bool(tensor(i)), bool(1));
tensor(i) = bool(i % 2);
EXPECT_EQ(a[i], bool(tensor(i)));
}
}
}
TEST(CuTe_core, Const_subbyte_iterator)
{
using namespace cute;
{
array_subbyte<uint8_t, 15> a{};
auto tensor = make_tensor(subbyte_iterator<uint8_t const>(a.raw_data()), make_shape(15));
fill(a, uint8_t(13));
for (int i = 0; i < int(a.size()); ++i) {
EXPECT_EQ(uint8_t(tensor(i)), 13);
a[i] = uint8_t(i);
EXPECT_EQ(a[i], uint8_t(tensor(i)));
}
}
{
array_subbyte<int4_t, 15> a{};
auto tensor = make_tensor(subbyte_iterator<int4_t const>(a.raw_data()), make_shape(15));
fill(a, int4_t(-5));
for (int i = 0; i < int(a.size()); ++i) {
EXPECT_EQ(int4_t(tensor(i)), int4_t(-5));
a[i] = int4_t(i);
EXPECT_EQ(int4_t(a[i]), int4_t(tensor(i)));
}
}
{
array_subbyte<uint2_t, 15> a{};
auto tensor = make_tensor(subbyte_iterator<uint2_t const>(a.raw_data()), make_shape(15));
fill(a, uint2_t(-5));
for (int i = 0; i < int(a.size()); ++i) {
EXPECT_EQ(uint2_t(tensor(i)), uint2_t(-5));
a[i] = uint2_t(i);
EXPECT_EQ(uint2_t(a[i]), uint2_t(tensor(i)));
}
}
{
array_subbyte<bool, 15> a{};
auto tensor = make_tensor(subbyte_iterator<bool const>(a.raw_data()), make_shape(15));
fill(a, bool(1));
for (int i = 0; i < int(a.size()); ++i) {
EXPECT_EQ(bool(tensor(i)), bool(1));
a[i] = bool(i % 2);
EXPECT_EQ(a[i], bool(tensor(i)));
}
}
}
| test/unit/cute/core/array_subbyte.cpp/0 | {
"file_path": "test/unit/cute/core/array_subbyte.cpp",
"repo_id": "test",
"token_count": 3076
} | 48 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests Generic CuTe Layouts
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/layout.h"
#include "cutlass/matrix_coord.h"
// Cute includes
#include <cute/layout.hpp>
#include <cute/int_tuple.hpp>
using namespace cutlass;
using namespace cute;
namespace test {
namespace layout {
template <typename GenericLayout, typename Layout>
struct Testbed {
Testbed() {}
bool run() {
GenericLayout generic_layout;
Layout layout = Layout::packed({size<0>(generic_layout), size<1>(generic_layout)});
for (int m = 0; m < size<0>(generic_layout); m++) {
for (int n = 0; n < size<1>(generic_layout); n++) {
if (generic_layout(m, n) != layout({m, n})) return false;
}
}
return true;
}
};
}
}
//////////////////////////////////////////////////////////////////////////
// Test Generic CuTe Layouts
//////////////////////////////////////////////////////////////////////////
/// Canonical Layouts
TEST(GenericLayout, ColumnMajor) {
using GenericLayout = cute::Layout<Shape<_8, _4>, Stride<_1, _8>>;
using Layout = cutlass::layout::ColumnMajor;
test::layout::Testbed<GenericLayout, Layout> testbed;
EXPECT_TRUE(testbed.run());
}
//////////////////////////////////////////////////////////////////////////
TEST(GenericLayout, RowMajor) {
using GenericLayout = cute::Layout<Shape<_8, _4>, Stride<_4, _1>>;
using Layout = cutlass::layout::RowMajor;
test::layout::Testbed<GenericLayout, Layout> testbed;
EXPECT_TRUE(testbed.run());
}
//////////////////////////////////////////////////////////////////////////
/// Swizzle Shared Memory layouts
TEST(GenericLayout, RowMajorTensorOpMultiplicandCrosswise) {
using GenericLayout = decltype(
composition(
Swizzle<3,3,3>{},
Layout<Shape<_128, _64>, Stride<_64, _1>>{})
);
using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<cutlass::half_t>::value, 64>;
test::layout::Testbed<GenericLayout, Layout> testbed;
EXPECT_TRUE(testbed.run());
}
//////////////////////////////////////////////////////////////////////////
TEST(GenericLayout, ColumnMajorTensorOpMultiplicandCongruous) {
using GenericLayout = decltype(
composition(
Swizzle<3,3,4>{},
Layout<Shape<_128, _64>>{})
);
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<cutlass::half_t>::value, 64>;
test::layout::Testbed<GenericLayout, Layout> testbed;
EXPECT_TRUE(testbed.run());
}
//////////////////////////////////////////////////////////////////////////
| test/unit/cute/layout/layout_operator.cu/0 | {
"file_path": "test/unit/cute/layout/layout_operator.cu",
"repo_id": "test",
"token_count": 1383
} | 49 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/epilogue/thread/linear_combination_drelu.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_reduction.h"
#include "cutlass/epilogue/threadblock/epilogue_with_reduction.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "epilogue_with_reduction_testbed.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Disable selected tests on CUDA 11.1
//
//
#define ENABLE_BLOCKED_TESTS (!(__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ == 1))
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_64x64_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_64x64_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_128x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_128x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_128x64_64x32x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if ENABLE_BLOCKED_TESTS
TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_128x64_64x32x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_64x128_32x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_64x128_32x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_128x256_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_128x256_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_256x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<256, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_256x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<256, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/threadblock/epilogue_with_reduction_tensor_op.cu/0 | {
"file_path": "test/unit/epilogue/threadblock/epilogue_with_reduction_tensor_op.cu",
"repo_id": "test",
"token_count": 8687
} | 50 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface with:
A: row major, of type FE4M4 or FE5M2
B: column major, of type FE4M3 or FE5M2
C: row major, of FE4M3 or FE5M2
Accum: F32
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/epilogue/thread/linear_combination_generic_with_scaling.h"
#include "cutlass/gemm/device/gemm_universal_with_absmax.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "testbed.h"
#include "testbed_with_absmax.h"
#if defined(CUTLASS_ARCH_MMA_SM89_SUPPORTED)
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_fastacc_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
static int const kAlignment = 16;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages,
kAlignment, kAlignment, cutlass::arch::OpMultiplyAddFastAccum
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, relu_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::ReLu,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::ReLu>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe5m2n_fe4m3t_tensor_op_f32, identity_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e5m2_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe5m2t_fe4m3n_fe4m3t_tensor_op_f32, identity_128x256x64_64x64x64) {
using ElementA = cutlass::float_e5m2_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe5m2t_fe5m2n_fe4m3t_tensor_op_f32, identity_128x256x64_64x64x64) {
using ElementA = cutlass::float_e5m2_t;
using ElementB = cutlass::float_e5m2_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe5m2t_tensor_op_f32, identity_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e5m2_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe5m2t_fe5m2n_fe5m2t_tensor_op_f32, identity_diff_aux_output_types_128x256x64_64x64x64) {
using ElementA = cutlass::float_e5m2_t;
using ElementB = cutlass::float_e5m2_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = cutlass::float_e5m2_t;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_128x128x64_32x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 128, 64>, cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_noScale_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>(
/* scaleA = */false,
/* scaleB = */false,
/* scaleC = */false
);
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_noAux_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = float;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
#endif // CUTLASS_ARCH_MMA_SM89_SUPPORTED
| test/unit/gemm/device/gemm_f8t_f8n_f8t_tensor_op_f32_sm89.cu/0 | {
"file_path": "test/unit/gemm/device/gemm_f8t_f8n_f8t_tensor_op_f32_sm89.cu",
"repo_id": "test",
"token_count": 6677
} | 51 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface with an elementwise tensor-tensor broadcast epilogue
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/epilogue_tensor_broadcast.hpp"
#include "cutlass/epilogue/thread/linear_combination_tensor_broadcast.hpp"
#include "../../common/cutlass_unit_test.h"
#include "gemm_testbed_3x_tensor_broadcast.hpp"
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_s8t_s8n_s8n_tensor_op_gmma_s32_tensor_broadcast, 128x128x128_2x2x1_ActReLU_Bin0Mul_Bin1Plus_UnaryHardSwish) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementOutput = int32_t;
using ElementAccumulator = ElementOutput;
using ElementCompute = ElementOutput;
using ElementBias = ElementOutput;
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
int8_t, LayoutA, 16,
int8_t, LayoutB, 16,
int32_t,
Shape<_128,_128,_128>, Shape<_2,_2,_1>,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using EpilogueOp = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<
cutlass::epilogue::collective::EpilogueTensorBroadcast<
cutlass::gemm::TagToStrideC_t<LayoutC>,
cutlass::gemm::TagToStrideC_t<LayoutC>,
cutlass::epilogue::thread::LinearCombinationTensorBroadcast<
ElementOutput, ElementAccumulator, ElementCompute, ElementBias,
cutlass::epilogue::thread::ReLu,
cutlass::multiplies,
cutlass::plus,
cutlass::epilogue::thread::HardSwish
>,
cutlass::gemm::EpilogueDefault>>;
EXPECT_TRUE(EpilogueOp::IsBinaryOp0Enabled);
EXPECT_TRUE(EpilogueOp::IsBinaryOp1Enabled);
EXPECT_TRUE(EpilogueOp::IsUnaryOpEnabled);
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveOp,
EpilogueOp
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAllTensorBroadcast<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
| test/unit/gemm/device/sm90_gemm_s8_s8_s8_tensor_op_s32_tensor_broadcast.cu/0 | {
"file_path": "test/unit/gemm/device/sm90_gemm_s8_s8_s8_tensor_op_s32_tensor_broadcast.cu",
"repo_id": "test",
"token_count": 1527
} | 52 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide Rank 2k update interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/blas3.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/reference/host/rank_k_complex.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename RankK>
struct TestbedRank2KUniversal {
using ElementA = typename RankK::ElementA;
using ElementC = typename RankK::ElementC;
using ElementAccumulator = typename RankK::ElementAccumulator;
using ElementCompute = typename RankK::RankKkernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<typename RankK::ElementA, typename RankK::LayoutA> tensor_A;
cutlass::HostTensor<typename RankK::ElementC, typename RankK::LayoutC> tensor_C;
cutlass::HostTensor<typename RankK::ElementC, typename RankK::LayoutC> tensor_D;
cutlass::HostTensor<typename RankK::ElementC, typename RankK::LayoutC> reference_D;
//
// Methods
//
TestbedRank2KUniversal(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed,
int mantissa_in_bits) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename RankK::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Input distribution not implemented";
return false;
}
return true;
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_symmetric_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed,
int mantissa_in_bits) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename RankK::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillSymmetricRandomUniform(
view, seed, RankK::kFillModeC, scope_max, scope_min, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillSymmetricRandomGaussian(
view, seed, RankK::kFillModeC, 0, 0.5, mantissa_in_bits);
}
else {
EXPECT_TRUE(false) << "Input distribution (symmetric tensor) not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the RankK workspace
//
tensor_A.resize(problem_size.mk());
tensor_C.resize(problem_size.mn());
tensor_D.resize(problem_size.mn());
reference_D.resize(problem_size.mn(), false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019, cutlass::MantissaInBits<typename RankK::ElementA>::bits));
EXPECT_TRUE(initialize_symmetric_tensor(tensor_C.host_view(), init_C, seed + 2017, cutlass::MantissaInBits<typename RankK::ElementC>::bits));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename RankK::ElementA(1);
tensor_C.host_view().at({0, 0}) = typename RankK::ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
if (tensor_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
if (reference_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
double l2_norm = cutlass::reference::host::TensorRelativeErrorMetric(reference_D.host_view(), tensor_D.host_view());
bool passed = l2_norm < cutlass::MantissaInBits<typename RankK::ElementA>::error;
return passed;
}
/// Verifies the result is a RankK
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
cutlass::reference::host::Rank2KComplex<
typename RankK::ElementA, typename RankK::LayoutA,
typename RankK::ElementC, typename RankK::LayoutC,
ElementCompute, ElementAccumulator
>(
problem_size,
alpha,
tensor_A.host_ref(),
RankK::kTransformA,
beta,
tensor_C.host_ref(),
reference_D.host_ref(),
ElementAccumulator(0),
RankK::kFillModeC,
RankK::kBlasMode
);
return compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename RankK::RankKkernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmUniversalMode mode,
cutlass::gemm::GemmCoord problem_size,
int batch_count = 1,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
#if 0
std::cout << "[TestbedRankKUniversal::run()] problem(m, n, k): " << problem_size
<< " alpha: " << ElementCompute(alpha)
<< " beta: " << ElementCompute(beta) << std::endl;
#endif
this->initialize(problem_size);
//
// Initialize the RankK operator
//
typename RankK::Arguments arguments{
mode,
problem_size,
batch_count,
{alpha, beta},
tensor_A.device_data(),
tensor_C.device_data(),
tensor_D.device_data(),
problem_size.n() * problem_size.k(),
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
tensor_A.layout().stride(0),
tensor_C.layout().stride(0),
tensor_D.layout().stride(0)
};
RankK rank2k_op;
size_t workspace_size = RankK::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = rank2k_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the RankK
//
status = rank2k_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
//if (true) {
if (!passed) {
std::stringstream fname;
fname << "error_RankK_device_"
<< "fill_mode_c_"
<< (RankK::kFillModeC == cutlass::FillMode::kLower ? "lower_" :
(RankK::kFillModeC == cutlass::FillMode::kUpper ? "upper_" : "invalid_"))
<< "mnk_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< RankK::ThreadblockShape::kM << "x"
<< RankK::ThreadblockShape::kN << "x"
<< RankK::ThreadblockShape::kK << "_"
<< RankK::WarpShape::kM << "x"
<< RankK::WarpShape::kN << "x"
<< RankK::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nC:\n" << tensor_C.host_view() << "\n"
<< "\nD reference:\n" << reference_D.host_view() << "\n"
<< "\nD computed:\n" << tensor_D.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename RankK>
bool TestRank2kUniversal(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmUniversalMode mode,
int batch_count,
double alpha = 1.0,
double beta = 2.0) {
bool passed = true;
TestbedRank2KUniversal<RankK> testbed;
using ElementCompute = typename RankK::EpilogueOutputOp::ElementCompute;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
return passed;
}
template <typename RankK>
bool TestAllRankKUniversal() {
bool passed = true;
int const kMinimumOperandElementSize = int(cutlass::sizeof_bits<typename RankK::ElementA>::value);
int const kAlignmentN = 128 / kMinimumOperandElementSize;
int const kAlignmentK = 128 / kMinimumOperandElementSize;
cutlass::gemm::GemmUniversalMode modes[] = {
cutlass::gemm::GemmUniversalMode::kGemm,
};
int problem_size_n[] = {
kAlignmentN, 512 - 2*kAlignmentN
};
int problem_size_k[] = {
kAlignmentK,
RankK::ThreadblockShape::kK * RankK::kStages - kAlignmentK,
RankK::ThreadblockShape::kK * RankK::kStages * 3 - kAlignmentK
};
int batch_counts[] = { // may be interpretted as batch count or split-K slices
1 // Just running one batch for now (removing 2, 3, 5, 7)
};
double problem_alpha[] = {
1.0
};
double problem_beta[] = {
2.0
};
using ElementCompute = typename RankK::EpilogueOutputOp::ElementCompute;
for (cutlass::gemm::GemmUniversalMode mode : modes) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (int batch_count : batch_counts) {
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
if (mode == cutlass::gemm::GemmUniversalMode::kGemm ||
mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) {
}
cutlass::gemm::GemmCoord problem_size(n, n, k);
TestbedRank2KUniversal<RankK> testbed;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_rank_k_universal.h/0 | {
"file_path": "test/unit/gemm/device/testbed_rank_k_universal.h",
"repo_id": "test",
"token_count": 6120
} | 53 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include "testbed.h"
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
////////////////////////////////////////////////////////////////////////////////
/// F32 <= F16 * I8 + F32 (Upcast on Operand B)
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_f16_i8, 128x128x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ElementA = cutlass::half_t;
using ElementB = int8_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementA>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementB>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_f16_i8, 64x64x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ElementA = cutlass::half_t;
using ElementB = int8_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementA>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementB>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<64, 64, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
/// F32 <= I8 * F16 + F32 (Upcast on Operand A)
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_i8_f16, 128x128x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ElementA = int8_t;
using ElementB = cutlass::half_t;;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementA>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementB>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_i8_f16, 64x64x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ElementA = int8_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementA>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementB>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<64, 64, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
/// F32 <= F16 * U8 + F32 (Upcast on Operand B)
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_f16_u8, 64x64x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ElementA = cutlass::half_t;
using ElementB = uint8_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementA>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementB>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<64, 64, 64> >()
.run();
}
TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_f16_u8, 128x128x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ElementA = cutlass::half_t;
using ElementB = uint8_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementA>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementB>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
/// F32 <= U8 * F16 + F32 (Upcast on Operand A)
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_u8_f16, 64x64x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ElementA = uint8_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementA>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementB>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<64, 64, 64> >()
.run();
}
TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_u8_f16, 128x128x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ElementA = uint8_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementA>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementB>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<128, 128, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
/// F32 <= B16 * U8 + F32 (Upcast on Operand B)
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_bf16_u8, 64x64x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ElementA = cutlass::bfloat16_t;
using ElementB = uint8_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementA>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementB>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<64, 64, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
/// F32 <= U8 * BF16 + F32 (Upcast on Operand A)
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_u8_bf16, 64x64x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ElementA = uint8_t;
using ElementB = cutlass::bfloat16_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementA>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementB>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<64, 64, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
/// F32 <= I8 * BF16 + F32 (Upcast on Operand A)
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_bf16_i8, 64x64x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ElementA = cutlass::bfloat16_t;
using ElementB = int8_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementA>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementB>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<64, 64, 64> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////
/// F32 <= B16 * I8 + F32 (Upcast on Operand B)
////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_mixed_input_tensor_op_crosswise_i8_bf16, 64x64x64_64x64x64_16x8x16) {
using Shape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ElementA = int8_t;
using ElementB = cutlass::bfloat16_t;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementA>::value, 64>;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
cutlass::sizeof_bits<ElementB>::value, 64>;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddMixedInputUpcast>::Type;
test::gemm::warp::TransformTestbed<MmaTensorOp,
cutlass::gemm::GemmShape<64, 64, 64> >()
.run();
}
#endif // if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
| test/unit/gemm/warp/gemm_mixed_input_sm80.cu/0 | {
"file_path": "test/unit/gemm/warp/gemm_mixed_input_sm80.cu",
"repo_id": "test",
"token_count": 5606
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief unit tests for NHWC tensor layout
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/device_memory.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace layout {
void test_nhwc_layout(int n_size, int h_size, int w_size, int c_size) {
int ldc = c_size + 1;
int ldw = ldc * (w_size + 2);
int ldh = ldw * (h_size + 3);
typedef cutlass::layout::TensorNHWC Tensor;
Tensor::Stride tensor_stride({ ldc, ldw, ldh });
Tensor tensor_nhw_packed_c(tensor_stride);
// test pointer offset
for (int n_idx = 0; n_idx < n_size; n_idx++) {
for (int p_idx = 0; p_idx < h_size; p_idx++) {
for (int q_idx = 0; q_idx < w_size; q_idx++) {
for (int c_idx = 0; c_idx < c_size; c_idx++) {
cutlass::Tensor4DCoord tensor_coord(n_idx, p_idx, q_idx, c_idx);
auto ptr_offset = tensor_nhw_packed_c(tensor_coord);
decltype(ptr_offset) reference_offset = c_idx +
q_idx * ldc +
p_idx * ldw +
n_idx * ldh;
EXPECT_EQ(ptr_offset, reference_offset);
}
}
}
}
// test stride
auto stride = tensor_nhw_packed_c.stride();
EXPECT_EQ(stride, tensor_stride);
// test capacity
auto capacity = tensor_nhw_packed_c.capacity(
cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size));
decltype(capacity) referece_capacity = ldh * n_size;
EXPECT_EQ(capacity, referece_capacity);
}
__global__ void test_nhwc_inverse(
int *output, int n_size, int h_size, int w_size, int c_size) {
int ldc = c_size;
int ldw = ldc * w_size;
int ldh = ldw * h_size;
typedef cutlass::layout::TensorNHWC Tensor;
Tensor::Stride tensor_stride({ ldc, ldw, ldh });
Tensor tensor_nhw_packed_c(tensor_stride);
for (int n_idx = 0; n_idx < n_size; n_idx++) {
for (int p_idx = 0; p_idx < h_size; p_idx++) {
for (int q_idx = 0; q_idx < w_size; q_idx++) {
cutlass::Tensor4DCoord tensor_coord(n_idx, p_idx, q_idx, threadIdx.x);
int ptr_offset = tensor_nhw_packed_c(tensor_coord);
cutlass::Tensor4DCoord inv_coord = tensor_nhw_packed_c.inverse(ptr_offset);
output[ptr_offset] = tensor_nhw_packed_c(inv_coord);
}
}
}
}
class TestTensorNHWC {
public:
//
// Data members
//
//
// Methods
//
/// Ctor
TestTensorNHWC() {
}
/// Runs the test
void run(int n_size, int h_size, int w_size, int c_size) {
size_t size = n_size * h_size * w_size * c_size;
/// Device memory containing output
cutlass::device_memory::allocation< int > output(size);
int *output_host = (int *)malloc(sizeof(int) * size);
dim3 grid(1,1);
dim3 block(c_size, 1, 1);
test::layout::test_nhwc_inverse<<< grid, block >>>(output.get(),
n_size, h_size, w_size, c_size);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result);
//
// Verify output
//
cutlass::device_memory::copy_to_host(output_host, output.get(), size);
result = cudaGetLastError();
ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result);
for (int n_idx = 0; n_idx < n_size; n_idx++) {
for (int p_idx = 0; p_idx < h_size; p_idx++) {
for (int q_idx = 0; q_idx < w_size; q_idx++) {
for (int c_idx = 0; c_idx < c_size; c_idx++) {
int reference_offset = c_idx +
q_idx * c_size +
p_idx * (c_size * w_size) +
n_idx * (c_size * w_size * h_size);
EXPECT_EQ(output_host[reference_offset], reference_offset);
}
}
}
}
}
};
} // namespace layout
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Layout_TensorNHWC, NHWC_1_16_8_32) {
int n_size = 1;
int h_size = 16;
int w_size = 8;
int c_size = 32;
test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size);
test::layout::TestTensorNHWC test_nhwc;
test_nhwc.run(n_size, h_size, w_size, c_size);
}
TEST(Layout_TensorNHWC, NHWC_2_16_8_32) {
int n_size = 2;
int h_size = 16;
int w_size = 8;
int c_size = 32;
test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size);
test::layout::TestTensorNHWC test_nhwc;
test_nhwc.run(n_size, h_size, w_size, c_size);
}
TEST(Layout_TensorNHWC, NHWC_2_16_8_128) {
int n_size = 2;
int h_size = 16;
int w_size = 8;
int c_size = 128;
test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size);
test::layout::TestTensorNHWC test_nhwc;
test_nhwc.run(n_size, h_size, w_size, c_size);
}
TEST(Layout_TensorNHWC, NHWC_4_8_16_128) {
int n_size = 4;
int h_size = 8;
int w_size = 16;
int c_size = 128;
test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size);
test::layout::TestTensorNHWC test_nhwc;
test_nhwc.run(n_size, h_size, w_size, c_size);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/layout/tensor_nhwc.cu/0 | {
"file_path": "test/unit/layout/tensor_nhwc.cu",
"repo_id": "test",
"token_count": 2912
} | 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit test for the PipelineTmaAsync class as it would be used in a Warp specialized loop
*/
#define KERNEL_DBG_TRACE false
#include "../common/cutlass_unit_test.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/arch/cluster_sm90.hpp>
#include <cutlass/util/reference/host/gemm.h>
#include <cutlass/cluster_launch.hpp>
#include "cutlass/core_io.h"
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "testbed.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/arch/barrier.h"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/arch/barrier.h"
#include "cutlass/arch/reg_reconfig.h"
using namespace cute;
using namespace cutlass;
//////////////////// KERNEL /////////////////////////
template <uint32_t Stages>
struct SharedStorage
{
typename cutlass::PipelineTmaAsync<Stages>::SharedStorage storage ;
};
struct KernelParams
{
uint32_t num_iterations;
int* data_ptr;
};
// Goal of this kernel is to complete deadlock-free
template <typename ClusterShape, uint32_t Stages>
__launch_bounds__(384, 1)
__global__ static
void pipeline_device(KernelParams const kernel_params)
{
extern __shared__ char shared_memory[];
using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages>;
using PipelineState = typename cutlass::PipelineState<Stages>;
using SharedStorage = SharedStorage<Stages>;
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory);
[[maybe_unused]] auto cta_layout = Layout<ClusterShape>{}; // (m,n) -> cta_id
int warp_group_idx = __shfl_sync(0xffffffff, threadIdx.x / 128, 0);
int warp_idx_in_warpgroup = __shfl_sync(0xffffffff, (threadIdx.x / 32) % 4, 0);
int warp_group_thread_idx = threadIdx.x % 128;
dim3 block_id_in_cluster = cute::block_id_in_cluster();
auto cluster_shape = ClusterShape{};
// #Producers = #RowsInCluster + #ColsInCluster - 1
uint32_t const NumProducers = cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1;
uint32_t const TmaTransactionBytes = static_cast<uint32_t>(sizeof(uint32_t) * NumProducers);
uint32_t const per_cta_bytes = sizeof(uint32_t);
// mbarrier.init
typename MainloopPipeline::Params params;
params.transaction_bytes = TmaTransactionBytes;
if (warp_group_idx == 0) {
params.role = MainloopPipeline::ThreadCategory::Producer;
}
else {
params.role = MainloopPipeline::ThreadCategory::Consumer;
}
params.is_leader = warp_group_thread_idx == 0;
params.num_consumers = 128;
MainloopPipeline pipeline(shared_storage.storage, params, cluster_shape);
__syncthreads();
// Ensure All CTAs in Cluster have completed init before issuing commits
cute::cluster_arrive_relaxed();
cute::cluster_wait();
// Producer WarpGroup
if (warp_group_idx == 0) {
cutlass::arch::warpgroup_reg_alloc<232>();
int lane_predicate = cute::elect_one_sync();
if (warp_idx_in_warpgroup == 0 && lane_predicate) {
int tma_k_prologue = min(Stages, kernel_params.num_iterations);
// Simulating Prologue TMA Loads
// For the DMA (prologue) - we start with an opposite phase - since we skip all waits
// i.e., we know that the buffer is indeed empty
PipelineState smem_pipe_write = make_producer_start_state<MainloopPipeline>();
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < tma_k_prologue; ++i) {
pipeline.producer_acquire(smem_pipe_write);
// Simulating cp.async.bulk.tensor behavior
pipeline.producer_commit(smem_pipe_write, per_cta_bytes);
++smem_pipe_write;
}
int tma_k_iter = kernel_params.num_iterations - tma_k_prologue;
// Simulating Mainloop TMA Loads
CUTE_NO_UNROLL
for ( ; tma_k_iter > 0; --tma_k_iter) {
pipeline.producer_acquire(smem_pipe_write);
// Simulating cp.async.bulk.tensor behavior
pipeline.producer_commit(smem_pipe_write, per_cta_bytes);
// Advance write stage
++smem_pipe_write;
}
// Tail Loop
// Handles the case where we never enter the mainloop
PipelineState tail = tma_k_prologue == Stages ? smem_pipe_write : PipelineState{};
for ( int i = 0; i < tma_k_prologue; ++i) {
pipeline.producer_acquire(tail);
++tail;
}
}
// Consumer WarpGroup
} else if(warp_group_idx == 1) {
cutlass::arch::warpgroup_reg_alloc<232>();
PipelineState smem_pipe_read;
PipelineState smem_pipe_release;
// simulates accumulators + extra reg. pressure
int arr[168];
// Init Shared Memory read stages & PhaseBit
static constexpr uint32_t K_PIPE_MMAS = 1;
static_assert( K_PIPE_MMAS < Stages, "ERROR : Too many MMAs in flight");
// Total number of gemm iterations
auto gemm_k_iterations = kernel_params.num_iterations;
// Simulating Prologue MMAs
int mma_k_prologue = min(K_PIPE_MMAS, gemm_k_iterations);
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < mma_k_prologue; ++iter) {
pipeline.consumer_wait(smem_pipe_read);
warpgroup_arrive();
// GMMA would typically happen here
++smem_pipe_read;
}
gemm_k_iterations -= mma_k_prologue;
// Simulating Mainloop MMAs
CUTLASS_PRAGMA_NO_UNROLL
for ( ; gemm_k_iterations > 0; --gemm_k_iterations) {
/// Wait on the smem_pipe_read stage / phase
pipeline.consumer_wait(smem_pipe_read);
warpgroup_arrive();
// GMMA would typically happen here
// Dummy op - which will never happen
// But simulates high register usage.
CUTE_UNROLL
for(int i = 0; i < 168; ++i){
if (threadIdx.x > 256){
arr[i] += kernel_params.data_ptr[i];
}
}
pipeline.consumer_release(smem_pipe_release);
// Advance stages
++smem_pipe_read;
++smem_pipe_release;
}
// Dummy op - which will never happen
CUTE_UNROLL
for(int i = 0; i < 168; ++i){
if (threadIdx.x > 256){
kernel_params.data_ptr[i] = arr[i];
}
}
// Tail Loop
for (int i = 0; i < K_PIPE_MMAS; ++i){
pipeline.consumer_release(smem_pipe_release);
++smem_pipe_release;
}
// Warp-Group #2
} else {
cutlass::arch::warpgroup_reg_dealloc<40>();
}
}
/////////////////////////////////////////////////////
/// Device NT GMMA + TMA specialized
template<uint32_t Stages_, typename ClusterShape_>
struct PipelineTest {
//
// Data members
//
static constexpr uint32_t Stages = Stages_;
static constexpr uint32_t kBlockSize = 128 * 3;
using ClusterShape = ClusterShape_;
//
// Methods
//
// Ctor
PipelineTest(){};
// Run CuTe GEMM kernel
cudaError_t run(uint32_t const kNumIters,
cudaStream_t stream = 0) {
float elapsed_ms = 0.0f;
// Pipeline (multistage pipeline)
[[maybe_unused]] auto num_stages = Int<Stages>{};
auto cluster_shape = Shape<Int<ClusterShape::kM>, Int<ClusterShape::kN>, _1>{};
//
// Configure and launch
//
int iterations = 1;
cudaEvent_t events[2];
cudaError_t result;
for (cudaEvent_t & event : events) {
result = cudaEventCreate(&event);
if (result != cudaSuccess) {
std::cerr << "Error: Failed to create event.";
return result;
}
}
result = cudaEventRecord(events[0]);
if (result != cudaSuccess) {
std::cerr << "Error: Failed to record start event.";
return result;
}
for (int iter = 0; iter < iterations; ++iter) {
using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages>;
int smem_size = int(sizeof(SharedStorage<Stages>));
result = cudaFuncSetAttribute(
pipeline_device<decltype(cluster_shape), Stages>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
// Launch a single Cluster, with kBlockSize threads per CTA
dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1);
dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1);
dim3 dimBlock(kBlockSize,1,1);
const void* kernel = (const void*)pipeline_device<decltype(cluster_shape), Stages>;
KernelParams params{kNumIters, nullptr};
void* kernel_params[] = {reinterpret_cast<void*>(¶ms)};
cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params);
}
result = cudaEventRecord(events[1]);
if (result != cudaSuccess) {
std::cerr << "Error: Failed to record stop event.";
return result;
}
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl;
return result;
}
result = cudaEventElapsedTime(&elapsed_ms, events[0], events[1]);
if (result != cudaSuccess) {
std::cerr << "Failed to create event.";
return result;
}
for (cudaEvent_t & event : events) {
(void)cudaEventDestroy(event);
}
return cudaSuccess;
}
};
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x1_Stage5) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 5;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x1_Stage10) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 10;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x2_Stage5) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 5;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x1_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x1_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster1x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster2x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS, Cluster4x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
#endif
| test/unit/pipeline/pipeline_tma_async_warp_specialized.cu/0 | {
"file_path": "test/unit/pipeline/pipeline_tma_async_warp_specialized.cu",
"repo_id": "test",
"token_count": 6453
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
\file
\brief Defines a data structure in which a set of functionally equivalent library::Operation
instances may be queried.
*/
#pragma once
#include <fstream>
#include <iosfwd>
#include <unordered_map>
#include <algorithm>
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "cutlass/library/util.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Data Structures for Gemm Functional Maps
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tuple uniquely identifying Gemm functional behavior
struct GemmFunctionalKey {
Provider provider;
GemmKind gemm_kind;
NumericTypeID element_compute;
NumericTypeID element_scalar;
NumericTypeID element_A;
LayoutTypeID layout_A;
ComplexTransform transform_A;
NumericTypeID element_B;
LayoutTypeID layout_B;
ComplexTransform transform_B;
NumericTypeID element_C;
LayoutTypeID layout_C;
NumericTypeID element_D;
LayoutTypeID layout_D;
//
// Methods
//
inline
GemmFunctionalKey(
Provider provider,
GemmKind gemm_kind = GemmKind::kGemm,
NumericTypeID element_compute = NumericTypeID::kF32,
NumericTypeID element_scalar = NumericTypeID::kF32,
NumericTypeID element_A = NumericTypeID::kF16,
LayoutTypeID layout_A = LayoutTypeID::kColumnMajor,
ComplexTransform transform_A = ComplexTransform::kNone,
NumericTypeID element_B = NumericTypeID::kF16,
LayoutTypeID layout_B = LayoutTypeID::kColumnMajor,
ComplexTransform transform_B = ComplexTransform::kNone,
NumericTypeID element_C = NumericTypeID::kF16,
LayoutTypeID layout_C = LayoutTypeID::kColumnMajor,
NumericTypeID element_D = NumericTypeID::kF16,
LayoutTypeID layout_D = LayoutTypeID::kColumnMajor
):
provider(provider),
gemm_kind(gemm_kind),
element_compute(element_compute),
element_scalar(element_scalar),
element_A(element_A),
layout_A(layout_A),
transform_A(transform_A),
element_B(element_B),
layout_B(layout_B),
transform_B(transform_B),
element_C(element_C),
layout_C(layout_C),
element_D(element_D),
layout_D(layout_D)
{ }
inline
bool operator==(GemmFunctionalKey const &rhs) const {
return
(provider == rhs.provider) &&
(gemm_kind == rhs.gemm_kind) &&
(element_compute == rhs.element_compute) &&
(element_scalar == rhs.element_scalar) &&
(element_A == rhs.element_A) &&
(layout_A == rhs.layout_A) &&
(transform_A == rhs.transform_A) &&
(element_B == rhs.element_B) &&
(layout_B == rhs.layout_B) &&
(transform_B == rhs.transform_B) &&
(element_C == rhs.element_C) &&
(layout_C == rhs.layout_C) &&
(element_D == rhs.element_D) &&
(layout_D == rhs.layout_D);
}
inline
bool operator!=(GemmFunctionalKey const &rhs) const {
return !(*this == rhs);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
inline
std::ostream & operator<<(std::ostream &out, cutlass::library::GemmFunctionalKey const &k) {
out << "{\n"
<< " provider: " << to_string(k.provider) << "\n"
<< " gemm_kind: " << to_string(k.gemm_kind) << "\n"
<< " element_compute: " << to_string(k.element_compute) << "\n"
<< " element_scalar: " << to_string(k.element_scalar) << "\n"
<< " element_A: " << to_string(k.element_A) << "\n"
<< " layout_A: " << to_string(k.layout_A) << "\n"
<< " transform_A: " << to_string(k.transform_A) << "\n"
<< " element_B: " << to_string(k.element_B) << "\n"
<< " layout_B: " << to_string(k.layout_B) << "\n"
<< " transform_B: " << to_string(k.transform_B) << "\n"
<< " element_C: " << to_string(k.element_C) << "\n"
<< " layout_C: " << to_string(k.layout_C) << "\n"
<< " element_D: " << to_string(k.element_D) << "\n"
<< " layout_D: " << to_string(k.layout_D) << "\n"
<< "}";
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Hash function for GemmFunctionalKey
struct GemmFunctionalKeyHasher {
using IntHash = std::hash<int>;
inline
static size_t rotl(size_t key, int shl) {
return (key << shl) | (key >> (sizeof(key)*8u - static_cast<size_t>(shl)));
}
inline
size_t operator()(GemmFunctionalKey const &key) const {
IntHash hash;
return
rotl(hash(int(key.provider)), 1) ^
rotl(hash(int(key.gemm_kind)), 2) ^
rotl(hash(int(key.element_compute)), 3) ^
rotl(hash(int(key.element_scalar)), 4) ^
rotl(hash(int(key.element_A)), 5) ^
rotl(hash(int(key.layout_A)), 6) ^
rotl(hash(int(key.transform_A)), 7) ^
rotl(hash(int(key.element_B)), 8) ^
rotl(hash(int(key.layout_B)), 9) ^
rotl(hash(int(key.transform_B)), 10) ^
rotl(hash(int(key.element_C)), 11) ^
rotl(hash(int(key.layout_C)), 12) ^
rotl(hash(int(key.element_D)), 13) ^
rotl(hash(int(key.layout_D)), 14);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Establishes a partial ordering to search for GEMM operators
struct GemmPreferenceKey {
int compute_capability;
int alignment;
//
// Methods
//
GemmPreferenceKey(): compute_capability(), alignment() { }
GemmPreferenceKey(int cc, int alignment): compute_capability(cc), alignment(alignment) { }
bool operator<(GemmPreferenceKey const &rhs) const {
return (compute_capability < rhs.compute_capability) ||
((compute_capability == rhs.compute_capability) && (alignment < rhs.alignment));
}
bool operator==(GemmPreferenceKey const &rhs) const {
return compute_capability == rhs.compute_capability;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
inline
std::ostream& operator<< (std::ostream& out, const cutlass::library::GemmPreferenceKey& key) {
out << "{\n"
<< "compute_capability : " << key.compute_capability << std::endl
<< "alignment : " << key.alignment << std::endl
<< "}";
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Maps minimum compute capability onto a vector of possible operations
using GemmOperationVectorMap = std::map<
GemmPreferenceKey,
std::vector<Operation const *>
>;
/// Maps a GemmFunctionalKey onto a vector of Operation * objects expected to be of kind kGemm
using GemmOperationFunctionalMap = std::unordered_map<
GemmFunctionalKey,
GemmOperationVectorMap,
GemmFunctionalKeyHasher
>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Data Structures for Conv Functional Maps
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tuple uniquely identifying conv2d functional behavior
struct ConvFunctionalKey {
library::Provider provider;
library::ConvKind conv_kind;
library::NumericTypeID element_A;
library::LayoutTypeID layout_A;
library::NumericTypeID element_B;
library::LayoutTypeID layout_B;
library::NumericTypeID element_C;
library::LayoutTypeID layout_C;
library::NumericTypeID element_accumulator;
library::NumericTypeID element_compute;
//
// Methods
//
inline
ConvFunctionalKey(
library::Provider provider = library::Provider::kInvalid,
library::ConvKind conv_kind = library::ConvKind::kFprop,
library::NumericTypeID element_A = library::NumericTypeID::kF16,
library::LayoutTypeID layout_A = library::LayoutTypeID::kTensorNHWC,
library::NumericTypeID element_B = library::NumericTypeID::kF16,
library::LayoutTypeID layout_B = library::LayoutTypeID::kTensorNHWC,
library::NumericTypeID element_C = library::NumericTypeID::kF16,
library::LayoutTypeID layout_C = library::LayoutTypeID::kTensorNHWC,
library::NumericTypeID element_accumulator = library::NumericTypeID::kF32,
library::NumericTypeID element_compute = library::NumericTypeID::kF32
):
provider(provider),
conv_kind(conv_kind),
element_A(element_A),
layout_A(layout_A),
element_B(element_B),
layout_B(layout_B),
element_C(element_C),
layout_C(layout_C),
element_accumulator(element_accumulator),
element_compute(element_compute)
{ }
inline
bool operator==(ConvFunctionalKey const &rhs) const {
return
(provider == rhs.provider) &&
(conv_kind == rhs.conv_kind) &&
(element_A == rhs.element_A) &&
(layout_A == rhs.layout_A) &&
(element_B == rhs.element_B) &&
(layout_B == rhs.layout_B) &&
(element_C == rhs.element_C) &&
(layout_C == rhs.layout_C) &&
(element_accumulator == rhs.element_accumulator) &&
(element_compute == rhs.element_compute);
}
inline
bool operator!=(ConvFunctionalKey const &rhs) const {
return !(*this == rhs);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
inline
std::ostream& operator<< (std::ostream& out, const cutlass::library::ConvFunctionalKey& key) {
out << "{\n"
<< "provider: " << to_string(key.provider) << std::endl
<< "conv_kind: " << to_string(key.conv_kind) << std::endl
<< "element_A: " << to_string(key.element_A) << std::endl
<< "layout_A: " << to_string(key.layout_A) << std::endl
<< "element_B: " << to_string(key.element_B) << std::endl
<< "layout_B: " << to_string(key.layout_B) << std::endl
<< "element_C: " << to_string(key.element_C) << std::endl
<< "layout_C: " << to_string(key.layout_C) << std::endl
<< "element_accumulator: " << to_string(key.element_accumulator) << std::endl
<< "element_compute: " << to_string(key.element_compute) << std::endl
<< "}";
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
struct ConvFunctionalKeyHasher {
using IntHash = std::hash<int>;
inline
static size_t rotl(size_t key, int shl) {
return (key << shl) | (key >> (sizeof(key)*8u - static_cast<size_t>(shl)));
}
inline
size_t operator()(ConvFunctionalKey const &key) const {
IntHash hash;
return
rotl(hash(int(key.provider)), 1) ^
rotl(hash(int(key.conv_kind)), 2) ^
rotl(hash(int(key.element_A)), 3) ^
rotl(hash(int(key.layout_A)), 4) ^
rotl(hash(int(key.element_B)), 5) ^
rotl(hash(int(key.layout_B)), 6) ^
rotl(hash(int(key.element_C)), 7) ^
rotl(hash(int(key.layout_C)), 8) ^
rotl(hash(int(key.element_accumulator)), 9) ^
rotl(hash(int(key.element_compute)), 10);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Establishes a partial ordering to search for Conv2d operators
struct ConvPreferenceKey {
int compute_capability;
IteratorAlgorithmID iterator_algorithm;
//
// Methods
//
ConvPreferenceKey(): compute_capability(), iterator_algorithm() { }
ConvPreferenceKey(int cc, IteratorAlgorithmID iterator_algorithm):
compute_capability(cc), iterator_algorithm(iterator_algorithm) { }
bool operator<(ConvPreferenceKey const &rhs) const {
return (compute_capability < rhs.compute_capability) ||
((compute_capability == rhs.compute_capability) && (iterator_algorithm < rhs.iterator_algorithm));
}
bool operator==(ConvPreferenceKey const &rhs) const {
return (compute_capability == rhs.compute_capability) &&
(iterator_algorithm == rhs.iterator_algorithm);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Maps minimum compute capability onto a vector of possible operations
using ConvOperationVectorMap = std::map<
ConvPreferenceKey,
std::vector<Operation const *>
>;
/// Maps a GemmFunctionalKey onto a vector of Operation * objects expected to be of kind kGemm
using ConvOperationFunctionalMap = std::unordered_map<
ConvFunctionalKey,
ConvOperationVectorMap,
ConvFunctionalKeyHasher
>;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tuple uniquely identifying conv2d functional behavior
struct ReductionFunctionalKey {
library::Provider provider;
library::NumericTypeID element_workspace;
library::NumericTypeID element_accumulator;
library::NumericTypeID element_output;
library::NumericTypeID element_compute;
library::MathOperationID reduce_math_op;
library::EpilogueKind epilogue_math_op;
//
// Methods
//
inline
ReductionFunctionalKey(
library::Provider provider = library::Provider::kInvalid,
library::NumericTypeID element_workspace = library::NumericTypeID::kF16,
library::NumericTypeID element_accumulator = library::NumericTypeID::kF32,
library::NumericTypeID element_output = library::NumericTypeID::kF16,
library::NumericTypeID element_compute = library::NumericTypeID::kF32,
library::MathOperationID reduce_math_op = library::MathOperationID::kAdd,
library::EpilogueKind epilogue_math_op = library::EpilogueKind::kLinearCombination
):
provider(provider),
element_workspace(element_workspace),
element_accumulator(element_accumulator),
element_output(element_output),
element_compute(element_compute),
reduce_math_op(reduce_math_op),
epilogue_math_op(epilogue_math_op)
{ }
inline
bool operator==(ReductionFunctionalKey const &rhs) const {
return
(provider == rhs.provider) &&
(element_workspace == rhs.element_workspace) &&
(element_accumulator == rhs.element_accumulator) &&
(element_output == rhs.element_output) &&
(element_compute == rhs.element_compute) &&
(reduce_math_op == rhs.reduce_math_op) &&
(epilogue_math_op == rhs.epilogue_math_op);
}
inline
bool operator!=(ReductionFunctionalKey const &rhs) const {
return !(*this == rhs);
}
};
struct ReductionFunctionalKeyHasher {
using IntHash = std::hash<int>;
inline
static size_t rotl(size_t key, int shl) {
return (key << shl) | (key >> (sizeof(key)*8u - static_cast<size_t>(shl)));
}
inline
size_t operator()(ReductionFunctionalKey const &key) const {
IntHash hash;
return
rotl(hash(int(key.provider)), 1) ^
rotl(hash(int(key.element_workspace)), 2) ^
rotl(hash(int(key.element_accumulator)), 3) ^
rotl(hash(int(key.element_output)), 4) ^
rotl(hash(int(key.element_compute)), 5) ^
rotl(hash(int(key.reduce_math_op)), 6) ^
rotl(hash(int(key.epilogue_math_op)), 7);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
inline
std::ostream& operator<< (std::ostream& out, const ReductionFunctionalKey& key) {
out << "{\n"
<< "provider: " << library::to_string(key.provider) << std::endl
<< "element_workspace : " << library::to_string(key.element_workspace) << std::endl
<< "element_accumulator : " << library::to_string(key.element_accumulator) << std::endl
<< "element_output : " << library::to_string(key.element_output) << std::endl
<< "element_compute : " << library::to_string(key.element_compute) << std::endl
<< "}";
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// ReductionOperationFunctionalMap has NO preference key and a single instance per functional key
// i.e. only one tile size configuration per functional key
using ReductionOperationFunctionalMap = std::unordered_map<
ReductionFunctionalKey,
library::Operation const *,
ReductionFunctionalKeyHasher
>;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Table of cutlass::library::Operation instances
class OperationTable {
public:
/// Map of all operations of type kGemm
// provider (kCUTLASS)
GemmOperationFunctionalMap gemm_operations;
/// Map of all operations of type kConv2d
// provider (kCUTLASS, kReferenceHost, kReferenceDevice)
ConvOperationFunctionalMap conv2d_operations;
/// Map of all operations of type kConv3d
// provider (kCUTLASS, kReferenceHost, kReferenceDevice)
ConvOperationFunctionalMap conv3d_operations;
/// Map of all operations of type kConv2d
// provider (kCUTLASS)
ReductionOperationFunctionalMap reduction_operations;
public:
void append(Manifest const &manifest);
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
std::ostream & operator<<(std::ostream &out, cutlass::library::GemmFunctionalKey const &k);
| tools/library/include/cutlass/library/operation_table.h/0 | {
"file_path": "tools/library/include/cutlass/library/operation_table.h",
"repo_id": "tools",
"token_count": 6529
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for reduction operation in CUTLASS Library.
*/
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "reduction_operation.h"
namespace cutlass {
namespace library {
// naming convention initialize_reduce_[ReductionOp]_[EpilogueOp]_[ElementWorkspace]_[ElementAccumulator]_[ElementOutput]
void initialize_reduce_add_linear_combination_f16_f16_f16(Manifest &manifest) {
using ElementWorkspace = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using ElementCompute = cutlass::half_t;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementWorkspace>::value,
ElementAccumulator,
ElementCompute
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using Operation_reduce_add_linear_combination_f16_f16_f16 = cutlass::reduction::device::ReduceSplitK<
cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>
>;
manifest.append(new ReductionOperation<
Operation_reduce_add_linear_combination_f16_f16_f16>(
"reduce_add_linear_combination_f16_f16_f16"
));
}
void initialize_reduce_add_linear_combination_f32_f32_f16(Manifest &manifest) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = cutlass::half_t;
using ElementCompute = float;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementWorkspace>::value,
ElementAccumulator,
ElementCompute
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using Operation_reduce_add_linear_combination_f32_f32_f16 = cutlass::reduction::device::ReduceSplitK<
cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>
>;
manifest.append(new ReductionOperation<
Operation_reduce_add_linear_combination_f32_f32_f16>(
"reduce_add_linear_combination_f32_f32_f16"
));
}
void initialize_reduce_add_linear_combination_f32_f32_bf16(Manifest &manifest) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = cutlass::bfloat16_t;
using ElementCompute = float;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementWorkspace>::value,
ElementAccumulator,
ElementCompute
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using Operation_reduce_add_linear_combination_f32_f32_bf16 = cutlass::reduction::device::ReduceSplitK<
cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>
>;
manifest.append(new ReductionOperation<
Operation_reduce_add_linear_combination_f32_f32_bf16>(
"reduce_add_linear_combination_f32_f32_bf16"
));
}
void initialize_reduce_add_linear_combination_f32_f32_f32(Manifest &manifest) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = float;
using ElementCompute = float;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementWorkspace>::value,
ElementAccumulator,
ElementCompute
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using Operation_reduce_add_linear_combination_f32_f32_f32 = cutlass::reduction::device::ReduceSplitK<
cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>
>;
manifest.append(new ReductionOperation<
Operation_reduce_add_linear_combination_f32_f32_f32>(
"reduce_add_linear_combination_f32_f32_f32"
));
}
void initialize_reduce_add_linear_combination_f64_f64_f64(Manifest &manifest) {
using ElementWorkspace = double;
using ElementAccumulator = double;
using ElementOutput = double;
using ElementCompute = double;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementWorkspace>::value,
ElementAccumulator,
ElementCompute
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using Operation_reduce_add_linear_combination_f64_f64_f64 = cutlass::reduction::device::ReduceSplitK<
cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>
>;
manifest.append(new ReductionOperation<
Operation_reduce_add_linear_combination_f64_f64_f64>(
"reduce_add_linear_combination_f64_f64_f64"
));
}
void initialize_reduce_add_linear_combination_cf32_cf32_cf32(Manifest &manifest) {
using ElementWorkspace = cutlass::complex<float>;
using ElementAccumulator = cutlass::complex<float>;
using ElementOutput = cutlass::complex<float>;
using ElementCompute = cutlass::complex<float>;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
128 / cutlass::sizeof_bits<ElementWorkspace>::value,
ElementAccumulator,
ElementCompute
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using Operation_reduce_add_linear_combination_cf32_cf32_cf32 = cutlass::reduction::device::ReduceSplitK<
cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>
>;
manifest.append(new ReductionOperation<
Operation_reduce_add_linear_combination_cf32_cf32_cf32>(
"reduce_add_linear_combination_cf32_cf32_cf32"
));
}
}
}
| tools/library/src/reduction/reduction_device.cu/0 | {
"file_path": "tools/library/src/reduction/reduction_device.cu",
"repo_id": "tools",
"token_count": 2880
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#pragma once
#include <stdexcept>
#include <list>
#include <vector>
#include "cutlass/library/library.h"
#include "cutlass/util/distribution.h"
#include "enumerated_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Device memory allocation
class DeviceAllocation {
private:
/// Data type of contained elements
library::NumericTypeID type_;
/// Gets the stride between elements
size_t batch_stride_;
/// Capacity in elements of device allocation
size_t capacity_;
/// Pointer to device memory
void *pointer_;
/// Layout type ID
library::LayoutTypeID layout_;
/// Stride vector
std::vector<int64_t> stride_;
/// Extent vector
std::vector<int> extent_;
/// Support allocating a 'batch' of non-overlapping tensors in contiguous memory
int batch_count_;
/// Buffer holding TensorRef instance to recently allocated memory
std::vector<uint8_t> tensor_ref_buffer_;
public:
//
// Static member functions
//
/// Determines the number of bytes needed to represent this numeric type
static size_t bytes(library::NumericTypeID type, size_t capacity);
/// Returns the stride of a packed layout
static std::vector<int64_t> get_packed_layout(
library::LayoutTypeID layout_id,
std::vector<int> const &extent);
/// returns the capacity needed
static size_t construct_layout(
void *bytes,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> &stride);
/// Returns true if two blocks have exactly the same value
static bool block_compare_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity);
/// Returns true if two blocks have approximately the same value
static bool block_compare_relatively_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity,
double epsilon,
double nonzero_floor);
public:
//
// Methods
//
DeviceAllocation();
DeviceAllocation(library::NumericTypeID type, size_t capacity);
DeviceAllocation(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride = std::vector<int64_t>(),
int batch_count = 1);
~DeviceAllocation();
DeviceAllocation &reset();
/// Allocates device memory of a given type and capacity
DeviceAllocation &reset(library::NumericTypeID type, size_t capacity);
/// Allocates memory for a given layout and tensor
DeviceAllocation &reset(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride = std::vector<int64_t>(),
int batch_count = 1);
/// Returns a buffer owning the tensor reference
std::vector<uint8_t> &tensor_ref() {
return tensor_ref_buffer_;
}
bool good() const;
/// Data type of contained elements
library::NumericTypeID type() const;
/// Pointer to start of device memory allocation
void *data() const;
/// Pointer to the first element of a batch
void *batch_data(int batch_idx) const;
/// Gets the layout type
library::LayoutTypeID layout() const;
/// Gets the stride vector
std::vector<int64_t> const & stride() const;
/// Gets the extent vector
std::vector<int> const & extent() const;
/// Gets the number of adjacent tensors in memory
int batch_count() const;
/// Gets the stride (in units of elements) between items
int64_t batch_stride() const;
/// Gets the stride (in units of bytes) between items
int64_t batch_stride_bytes() const;
/// Capacity of allocation in number of elements
size_t capacity() const;
/// Capacity of allocation in bytes
size_t bytes() const;
/// Initializes a device allocation to a random distribution using cuRAND
void initialize_random_device(int seed, Distribution dist);
/// Initializes a host allocation to a random distribution using std::cout
void initialize_random_host(int seed, Distribution dist);
/// Initializes a device allocation to a sequential distribution
void initialize_sequential_device(Distribution dist);
/// Initializes a host allocation to a sequential distribution
void initialize_sequential_host(Distribution dist);
/// Initializes a device allocation to a random distribution using cuRAND
void initialize_random_sparsemeta_device(int seed, int MetaSizeInBits);
/// Initializes a host allocation to a random distribution using std::cout
void initialize_random_sparsemeta_host(int seed, int MetaSizeInBits);
/// Uniformly fills a tensor with a value when provided o.w. zero
void fill_device(double value);
/// Uniformly fills a host allocation with a value when provided o.w. zero
void fill_host(double value);
/// Copies from an equivalent-sized tensor in device memory
void copy_from_device(void const *ptr);
/// Copies from an equivalent-sized tensor in device memory
void copy_from_host(void const *ptr);
/// Copies from an equivalent-sized tensor in device memory
void copy_to_host(void *ptr);
/// Writes a tensor to csv
void write_tensor_csv(std::ostream &out);
};
using DeviceAllocationList = std::list<DeviceAllocation>;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/include/cutlass/profiler/device_allocation.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/device_allocation.h",
"repo_id": "tools",
"token_count": 2109
} | 59 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Convolution 2D profiling
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cutlass/profiler/conv2d_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
using namespace cutlass::library;
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
Conv2dOperationProfiler::Conv2dOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kConv2d,
{
{ArgumentTypeID::kEnumerated, {"conv_kind"}, "Convolutional operator (fprop, dgrad, wgrad)"},
{ArgumentTypeID::kInteger, {"n", "input_n"}, "Input N dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"h", "input_h"}, "Input H dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"w", "input_w"}, "Input W dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"c", "input_c"}, "Input C dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"k", "filter_k"}, "Filter K dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"r", "filter_r"}, "Filter R dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"s", "filter_s"}, "Filter S dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"p", "output_p"}, "Output P dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"q", "output_q"}, "Output Q dimension of the Conv2d problem space"},
{ArgumentTypeID::kInteger, {"g", "groups"}, "Number of convolution groups"},
{ArgumentTypeID::kInteger, {"pad_h"}, "Padding in H direction"},
{ArgumentTypeID::kInteger, {"pad_w"}, "Padding in W direction"},
{ArgumentTypeID::kInteger, {"stride_h"}, "Stride in H direction"},
{ArgumentTypeID::kInteger, {"stride_w"}, "Stride in W direction"},
{ArgumentTypeID::kInteger, {"dilation_h"}, "Dilation in H direction"},
{ArgumentTypeID::kInteger, {"dilation_w"}, "Dilation in W direction"},
{ArgumentTypeID::kTensor, {"Activation"}, "Tensor storing the Activation operand"},
{ArgumentTypeID::kTensor, {"Filter"}, "Tensor storing the Filter operand"},
{ArgumentTypeID::kTensor, {"Output"}, "Tensor storing the Output operand"},
{ArgumentTypeID::kEnumerated, {"conv_mode"}, "Convolution filter mode (conv, cross)"},
{ArgumentTypeID::kEnumerated, {"iterator_algorithm", "iterator_algo"}, "Convolution iterator algorithm (analytic, optimized)"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "SplitK mode for serial or parallel reduction (serial, parallel)"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kEnumerated, {"eq_gemm_provider", "eq-gemm-provider"}, "Enable profiling equivalent gemm by the following providers (cutlass)"},
},
{ library::Provider::kReferenceDevice, library::Provider::kReferenceHost, library::Provider::kCUDNN }
) {
description_ = " Conv2d operation. Output(Tensor4D) = alpha * Input(Tensor4D) * Filter(Tensor4D) + beta * Input(Tensor4D)";
}
/// Destructor
Conv2dOperationProfiler::~Conv2dOperationProfiler() {
}
/// Prints usage statement for the math function
void Conv2dOperationProfiler::print_usage(std::ostream &out) const {
out << "Conv2d" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void Conv2dOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular convolution (specify all the convolution parameters):\n"
<< " $ cutlass_profiler --operation=Conv2d"
" --Activation=f16:nhwc --Filter=f16:nhwc --Output=f16 --accumulator-type=f32"
" --n=32 --h=14 --w=14 --c=8 --k=64 --r=3 --s=3"
" --pad_h=1 --pad_w=1"
" --stride_h=1 --stride_w=1"
" --dilation_h=1 --dilation_w=1\n\n";
}
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Total number of bytes loaded
int64_t Conv2dOperationProfiler::Conv2dProblem::bytes(
library::ConvDescription const &operation_desc) const {
cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind);
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes_ =
int64_t(library::sizeof_bits(operation_desc.A.element) * mnk.m() / 8) * mnk.k() +
int64_t(library::sizeof_bits(operation_desc.B.element) * mnk.n() / 8) * mnk.k() +
int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n();
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes_ += int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n();
}
return bytes_;
}
/// Total number of flops computed
int64_t Conv2dOperationProfiler::Conv2dProblem::flops(
library::ConvDescription const &operation_desc) const {
cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind);
int64_t flops_mainloop_ = int64_t(mnk.m()) * mnk.n() * mnk.k() * 2;
int64_t flops_epilogue_ = int64_t(mnk.m()) * int64_t(mnk.n()) * 2;
// Adjust mainloop flop for dgrad strided
if (operation_desc.conv_kind == library::ConvKind::kDgrad) {
flops_mainloop_ = flops_mainloop_ / (stride_h * stride_w);
}
int64_t flops_total_ = flops_mainloop_ + flops_epilogue_;
//complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_total_ *=4;
break;
default: break;
}
return flops_total_;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status Conv2dOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::ConvDescription const &operation_desc =
static_cast<library::ConvDescription const &>(operation->description());
if (!arg_as_int(problem_.n, "n", problem_space, problem)) {
// default value
problem_.n = 1;
}
if (!arg_as_int(problem_.h, "h", problem_space, problem)) {
// default value
problem_.h = 16;
}
if (!arg_as_int(problem_.w, "w", problem_space, problem)) {
// default value
problem_.w = 16;
}
if (!arg_as_int(problem_.c, "c", problem_space, problem)) {
// default value
problem_.c = 64;
}
if (!arg_as_int(problem_.k, "k", problem_space, problem)) {
// default value
problem_.k = 64;
}
if (!arg_as_int(problem_.r, "r", problem_space, problem)) {
// default value
problem_.r = 3;
}
if (!arg_as_int(problem_.s, "s", problem_space, problem)) {
// default value
problem_.s = 3;
}
if (!arg_as_int(problem_.groups, "g", problem_space, problem)) {
// default value
problem_.groups = 1;
}
if (!arg_as_int(problem_.pad_h, "pad_h", problem_space, problem)) {
// default value
problem_.pad_h = 1;
}
if (!arg_as_int(problem_.pad_w, "pad_w", problem_space, problem)) {
// default value
problem_.pad_w = 1;
}
if (!arg_as_int(problem_.stride_h, "stride_h", problem_space, problem)) {
// default value
problem_.stride_h = 1;
}
if (!arg_as_int(problem_.stride_w, "stride_w", problem_space, problem)) {
// default value
problem_.stride_w = 1;
}
if (!arg_as_int(problem_.dilation_h, "dilation_h", problem_space, problem)) {
// default value
problem_.dilation_h = 1;
}
if (!arg_as_int(problem_.dilation_w, "dilation_w", problem_space, problem)) {
// default value
problem_.dilation_w = 1;
}
//////////////////////// Convolution output dimensions p and q ////////////////////////
// Cutlass convolutions support arbitrary output sizes and not constrained by //
// input, filter, padding, striding, dilation sizes. //
// cuDNN sets the output dimensions (p, q) using following equations: //
// //
// output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) //
// where; div_up(a, b) : (a - 1)/b + 1 //
// //
// Thus, when output p and q dimensions are unspecified by the user //
// cutlass profiler sets p and q which are cuDNN compliant. //
// //
////////////////////////////////////////////////////////////////////////////////////////
// set convolution output p
if (!arg_as_int(problem_.p, "p", problem_space, problem)) {
// default value (set using cudnn formula for output height, when p is not provided)
problem_.p = (
problem_.h +
2 * problem_.pad_h -
((problem_.r - 1) * problem_.dilation_h + 1)
) / (problem_.stride_h)
+ 1;
}
// set convolution output q
if (!arg_as_int(problem_.q, "q", problem_space, problem)) {
// default value (set using cudnn formula for output width, when q is not provided)
problem_.q = (
problem_.w +
2 * problem_.pad_w -
((problem_.s - 1) * problem_.dilation_w + 1)
) / (problem_.stride_w)
+ 1;
}
/////////////////////////////////////////////////////////////////////////////////////////
if (!arg_as_SplitKModeID(problem_.split_k_mode, "split_k_mode", problem_space, problem)) {
// default value
problem_.split_k_mode = library::SplitKMode::kSerial;
}
if (!arg_as_int(problem_.split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
problem_.split_k_slices = 1;
}
if (!arg_as_ConvModeID(problem_.conv_mode, "conv_mode", problem_space, problem)) {
// default value
problem_.conv_mode = library::ConvModeID::kCrossCorrelation;
}
if (!arg_as_ProviderID(problem_.eq_gemm_provider, "eq_gemm_provider", problem_space, problem)) {
// default value
problem_.eq_gemm_provider = library::Provider::kNone;
}
if (!conv_kind_satisfies(operation_desc.conv_kind, "conv_kind", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!iterator_algorithm_satisfies(operation_desc.iterator_algorithm, "iterator_algorithm", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.activation(), "Activation", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.filter(), "Filter", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.output(), "Output", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
problem_.alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(problem_.alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
problem_.beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(problem_.beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
// initialize library::Conv2dConfiguration
conv_workspace_.configuration.problem_size = conv::Conv2dProblemSize(
int(problem_.n),
int(problem_.h),
int(problem_.w),
int(problem_.c),
int(problem_.k),
int(problem_.r),
int(problem_.s),
int(problem_.p),
int(problem_.q),
int(problem_.pad_h),
int(problem_.pad_w),
int(problem_.stride_h),
int(problem_.stride_w),
int(problem_.dilation_h),
int(problem_.dilation_w),
static_cast<conv::Mode>(static_cast<int>(problem_.conv_mode)),
int(problem_.split_k_slices),
int(problem_.groups)
);
conv_workspace_.configuration.split_k_mode = static_cast<conv::SplitKMode>(static_cast<int>(problem_.split_k_mode));
conv_workspace_.set_stride_vector(
problem_, operation_desc.conv_kind, operation_desc.A.layout,
operation_desc.B.layout, operation_desc.C.layout);
// initialize library::ConvArguments
conv_workspace_.arguments.A = nullptr;
conv_workspace_.arguments.B = nullptr;
conv_workspace_.arguments.C = nullptr;
conv_workspace_.arguments.D = nullptr;
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
// initialize reduction operation for parallel splitKMode
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if(!initialize_reduction_configuration_(options, report, device_context, operation, problem_space, problem)) {
return Status::kErrorInternal;
}
}
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&conv_workspace_.configuration, &conv_workspace_.arguments);
}
/// Initializes the performance result
void Conv2dOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::ConvDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
result.arguments.resize(problem_space.rank());
set_argument(result, "Activation", problem_space,
std::string(library::to_string(operation_desc.activation().element))
+ ":" + library::to_string(operation_desc.activation().layout));
set_argument(result, "Filter", problem_space,
std::string(library::to_string(operation_desc.filter().element))
+ ":" + library::to_string(operation_desc.filter().layout));
set_argument(result, "Output", problem_space,
std::string(library::to_string(operation_desc.output().element))
+ ":" + library::to_string(operation_desc.output().layout));
set_argument(result, "conv_kind", problem_space, library::to_string(operation_desc.conv_kind));
set_argument(result, "iterator_algorithm", problem_space, std::string(library::to_string(operation_desc.iterator_algorithm)));
set_argument(result, "n", problem_space, problem_.n);
set_argument(result, "h", problem_space, problem_.h);
set_argument(result, "w", problem_space, problem_.w);
set_argument(result, "c", problem_space, problem_.c);
set_argument(result, "k", problem_space, problem_.k);
set_argument(result, "r", problem_space, problem_.r);
set_argument(result, "s", problem_space, problem_.s);
set_argument(result, "p", problem_space, problem_.p);
set_argument(result, "q", problem_space, problem_.q);
set_argument(result, "g", problem_space, problem_.groups);
set_argument(result, "pad_h", problem_space, problem_.pad_h);
set_argument(result, "pad_w", problem_space, problem_.pad_w);
set_argument(result, "stride_h", problem_space, problem_.stride_h);
set_argument(result, "stride_w", problem_space, problem_.stride_w);
set_argument(result, "dilation_h", problem_space, problem_.dilation_h);
set_argument(result, "dilation_w", problem_space, problem_.dilation_w);
set_argument(result, "split_k_mode", problem_space,
std::string(library::to_string(problem_.split_k_mode)));
set_argument(result, "split_k_slices", problem_space, problem_.split_k_slices);
set_argument(result, "conv_mode", problem_space,
std::string(library::to_string(problem_.conv_mode)));
set_argument(result, "alpha", problem_space,
library::lexical_cast(problem_.alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(problem_.beta, operation_desc.element_epilogue));
set_argument(result, "eq_gemm_provider", problem_space,
std::string(library::to_string(problem_.eq_gemm_provider)));
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
// Bytes of activation, filter, and output tensors
int64_t activation_bytes = int64_t(library::sizeof_bits(operation_desc.activation().element) / 8) *
conv_workspace_.configuration.problem_size.activation_size();
int64_t filter_bytes = int64_t(library::sizeof_bits(operation_desc.filter().element) / 8) *
conv_workspace_.configuration.problem_size.filter_size();
int64_t output_bytes = int64_t(library::sizeof_bits(operation_desc.output().element) / 8) *
conv_workspace_.configuration.problem_size.output_size();
// Bytes of activation, filter, and output tensors
result.bytes = problem_.bytes(operation_desc);
// Theoretical flops required for the computation
result.flops = problem_.flops(operation_desc);
// Measured runtime
result.runtime = 0;
}
/// Initialize reduction problem dimensions and library::Operation
bool Conv2dOperationProfiler::initialize_reduction_configuration_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::ConvDescription const &conv_desc =
static_cast<library::ConvDescription const &>(operation->description());
library::ConvKind const &conv_kind = conv_desc.conv_kind;
if (!cast_from_double(problem_.alpha_one, conv_desc.element_epilogue, 1)) {
return false;
}
if (!cast_from_double(problem_.beta_zero, conv_desc.element_epilogue, 0)) {
return false;
}
/// This chooses the appropriate stride element of the row-major C tensor.
int const & tensor_c_stride_idx = (conv_kind == library::ConvKind::kWgrad ? 2 : 0);
/// initialize library::ReductionConfiguration
conv_workspace_.reduction_configuration.problem_size = problem_.eq_gemm_size(conv_kind).mn();
conv_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices);
conv_workspace_.reduction_configuration.partition_stride = problem_.eq_gemm_size(conv_kind).mn().product();
conv_workspace_.reduction_configuration.ldw =
conv_workspace_.configuration.stride_c[tensor_c_stride_idx];
conv_workspace_.reduction_configuration.lds =
conv_workspace_.configuration.stride_c[tensor_c_stride_idx];
conv_workspace_.reduction_configuration.ldd =
conv_workspace_.configuration.stride_c[tensor_c_stride_idx];
// find reduction operation
library::ReductionFunctionalKey reduction_key(
library::Provider::kCUTLASS,
conv_desc.tile_description.math_instruction.element_accumulator, // element workspace
conv_desc.tile_description.math_instruction.element_accumulator, // element accumulator
conv_desc.C.element, // element output
conv_desc.element_epilogue // element compute
);
#if 0// debug print to check which reduction instance is selected
std::cout << reduction_key << "\n";
#endif
auto reduction_it = Singleton::get().operation_table.reduction_operations.find(reduction_key);
if(reduction_it == Singleton::get().operation_table.reduction_operations.end()) {
return false;
}
// initialize reduction operation required for parallel split-k conv2d operator
reduction_op_ = reduction_it->second;
// reduction operation found and initialized
return true;
}
/// Initializes workspace
Status Conv2dOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
// initialize conv2d underlying operation to handle parallel reduction
library::Operation const* underlying_operation = operation;
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
library::ConvDescription const &operation_desc =
static_cast<library::ConvDescription const &>(underlying_operation->description());
// Compute the number of copies of the problem to avoid L2 camping.
if (!options.profiling.workspace_count) {
int64_t bytes = problem_.bytes(operation_desc);
if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) {
conv_workspace_.problem_count =
1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes);
}
else {
conv_workspace_.problem_count = 1;
}
}
else {
conv_workspace_.problem_count = options.profiling.workspace_count;
}
if (options.execution_mode != ExecutionMode::kDryRun) {
int seed_shift = 0;
conv_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
problem_.extent_a(operation_desc.conv_kind),
conv_workspace_.configuration.stride_a,
conv_workspace_.problem_count,
seed_shift++
);
conv_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
problem_.extent_b(operation_desc.conv_kind),
conv_workspace_.configuration.stride_b,
conv_workspace_.problem_count,
seed_shift++
);
if(problem_.groups == problem_.c && problem_.groups == problem_.k){
// Depthwise direct conv kernel needs reorder the filter.
conv_workspace_.reordered_B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
problem_.extent_b(operation_desc.conv_kind),
conv_workspace_.configuration.stride_b,
conv_workspace_.problem_count,
seed_shift++
);
}
conv_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
problem_.extent_c(operation_desc.conv_kind),
conv_workspace_.configuration.stride_c,
conv_workspace_.problem_count,
seed_shift++
);
conv_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
problem_.extent_c(operation_desc.conv_kind),
conv_workspace_.configuration.stride_c,
conv_workspace_.problem_count
);
conv_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
problem_.extent_c(operation_desc.conv_kind),
conv_workspace_.configuration.stride_c,
conv_workspace_.problem_count
);
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = underlying_operation->get_host_workspace_size(&conv_workspace_.configuration);
conv_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = underlying_operation->get_device_workspace_size(&conv_workspace_.configuration);
conv_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = underlying_operation->initialize(
&conv_workspace_.configuration,
conv_workspace_.host_workspace.data(),
conv_workspace_.device_workspace.data());
if (status != Status::kSuccess) {
return status;
}
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
workspace_size = reduction_op_->get_host_workspace_size(&conv_workspace_.reduction_configuration);
conv_workspace_.reduction_host_workspace.resize(workspace_size, 0);
status = reduction_op_->initialize(
&conv_workspace_.reduction_configuration,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
if (status != Status::kSuccess) {
return status;
}
}
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kConv2d;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool Conv2dOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
cudaError_t result;
// Initialize structure containing Conv2d arguments
conv_workspace_.arguments.A = conv_workspace_.A->data();
conv_workspace_.arguments.B = conv_workspace_.B->data();
conv_workspace_.arguments.C = conv_workspace_.C->data();
conv_workspace_.arguments.D = conv_workspace_.Computed->data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
if (conv_workspace_.reordered_B != nullptr){
conv_workspace_.arguments.reordered_B = conv_workspace_.reordered_B->data();
}else{
conv_workspace_.arguments.reordered_B = nullptr;
}
conv_workspace_.Computed->copy_from_device(conv_workspace_.C->data());
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
// update library::ConvArguments for parallel split-k reduction
conv_workspace_.arguments.D = conv_workspace_.device_workspace.data();
conv_workspace_.arguments.alpha = problem_.alpha_one.data();
conv_workspace_.arguments.beta = problem_.beta_zero.data();
/// initialize library::ReductionArguments
conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
conv_workspace_.reduction_arguments.source = conv_workspace_.C->data();
conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data();
conv_workspace_.reduction_arguments.alpha = problem_.alpha.data();
conv_workspace_.reduction_arguments.beta = problem_.beta.data();
conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
}
//
// Run the CUTLASS operation
//
// initialize conv2d underlying operation to handle parallel reduction
library::Operation const* underlying_operation = operation;
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
#if 0
std::cout << "profiling : " << std::endl
<< "conv2d : " << operation->description().name << std::endl
<< "underlying conv2d : " << underlying_operation->description().name << std::endl
<< "reduction : " << reduction_op_->description().name << std::endl;
#endif
// run cutlass conv2d operation
results_.back().status = underlying_operation->run(
&conv_workspace_.arguments,
conv_workspace_.host_workspace.data(),
conv_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// Run parallel reduction kernel for parallel split_k_mode
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
results_.back().status = reduction_op_->run(
&conv_workspace_.reduction_arguments,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
// Synchronize before running device reference
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUDNN
// Run verification cudnn reference
if (options.verification.provider_enabled(library::Provider::kCUDNN)) {
// Guard against unsupported cases
auto const & conv_desc = static_cast<library::ConvDescription const &>(operation->description());
Status status = cudnn_satisfies(conv_desc, conv_workspace_.configuration);
// Initialize reference data to the source data
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
if (status == Status::kSuccess) {
// call cudnn verification if supported
verify_with_cudnn_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else if (status == Status::kErrorInvalidProblem) {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kInvalidProblem;
}
else {
// set verification map for cudnn to not supported
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUDNN
// Run verification device reference
if (options.verification.provider_enabled(library::Provider::kReferenceDevice)) {
// Restore reference data back to initial source data
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
verify_with_device_reference_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
// Run verification host reference
if (options.verification.provider_enabled(library::Provider::kReferenceHost)) {
// Restore reference data back to initial source data
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
verify_with_host_reference_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
/// Verifies CUTLASS against host reference
bool Conv2dOperationProfiler::verify_with_host_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
Status status;
//
// Find host reference operation using conv2d functional description key
//
library::OperationDescription const &desc = operation->description();
auto &conv_desc = static_cast<library::ConvDescription const &>(desc);
library::ConvFunctionalKey conv2d_key(
library::Provider::kReferenceHost,
conv_desc.conv_kind,
conv_desc.A.element,
conv_desc.A.layout,
conv_desc.B.element,
conv_desc.B.layout,
conv_desc.C.element,
conv_desc.C.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.element_epilogue);
#if 0 // debug print to check which host reference instance is selected
std::cout << conv2d_key << "\n";
#endif
auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key);
if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) {
results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun;
return true;
}
// conv2d host reference minimum cc is 0 (CPU) and no iterator algorithm
library::ConvPreferenceKey preference_key(0, library::IteratorAlgorithmID::kNone);
auto cc_it = operators_it->second.find(preference_key);
if(cc_it == operators_it->second.end()) {
results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun;
return true;
}
// host reference has only one instances in Conv2dOperationVectorMap
library::Operation const *reference_op = cc_it->second[0];
//
// Copy input tensors A, B, and C from device to host buffers
//
conv_workspace_.host_tensor_a.resize(conv_workspace_.A->bytes());
conv_workspace_.host_tensor_b.resize(conv_workspace_.B->bytes());
conv_workspace_.host_tensor_c.resize(conv_workspace_.C->bytes());
conv_workspace_.A->copy_to_host(conv_workspace_.host_tensor_a.data());
conv_workspace_.B->copy_to_host(conv_workspace_.host_tensor_b.data());
conv_workspace_.C->copy_to_host(conv_workspace_.host_tensor_c.data());
//
// Initialize structure containing Conv2d arguments
//
conv_workspace_.arguments.A = conv_workspace_.host_tensor_a.data();
conv_workspace_.arguments.B = conv_workspace_.host_tensor_b.data();
conv_workspace_.arguments.C = conv_workspace_.host_tensor_c.data();
conv_workspace_.arguments.D = conv_workspace_.host_tensor_c.data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Initialize host reference operation
//
std::vector<uint8_t> host_workspace_reference_op;
uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration);
host_workspace_reference_op.resize(workspace_size, 0);
reference_op->initialize(
&conv_workspace_.configuration,
host_workspace_reference_op.data());
//
// Run host reference operation
//
status = reference_op->run(
&conv_workspace_.arguments,
host_workspace_reference_op.data());
// Handle errors
if (status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotVerified;
return true;
}
//
// Copy host reference output to device memory for equality check on device
//
conv_workspace_.Reference->copy_from_host(conv_workspace_.arguments.D);
//
// Verify results
//
results_.back().verification_map[library::Provider::kReferenceHost] = compare_tensors(
options,
*conv_workspace_.Computed,
*conv_workspace_.Reference,
conv_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kReferenceHost] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
static_cast<library::ConvDescription const &>(operation->description()),
library::Provider::kCUTLASS,
library::Provider::kReferenceHost);
}
// Return true means continue profiling
return true;
}
/// Verifies CUTLASS against host reference
bool Conv2dOperationProfiler::verify_with_device_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
Status status;
//
// Find device reference operation using conv2d functional description key
//
library::OperationDescription const &desc = operation->description();
auto &conv_desc = static_cast<library::ConvDescription const &>(desc);
library::ConvFunctionalKey conv2d_key(
library::Provider::kReferenceDevice,
conv_desc.conv_kind,
conv_desc.A.element,
conv_desc.A.layout,
conv_desc.B.element,
conv_desc.B.layout,
conv_desc.C.element,
conv_desc.C.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.element_epilogue);
auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key);
if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) {
results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun;
return true;
}
// conv2d device reference minimum cc is 50 and no iterator algorithm
library::ConvPreferenceKey preference_key(50, library::IteratorAlgorithmID::kNone);
auto cc_it = operators_it->second.find(preference_key);
if(cc_it == operators_it->second.end()) {
results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun;
return true;
}
// device reference has only one instances in Conv2dOperationVectorMap
library::Operation const *reference_op = cc_it->second[0];
//
// Initialize device reference operation
//
std::vector<uint8_t> host_workspace_reference_op;
uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration);
host_workspace_reference_op.resize(workspace_size, 0);
reference_op->initialize(
&conv_workspace_.configuration,
host_workspace_reference_op.data());
// Initialize structure containing Conv2d arguments
conv_workspace_.arguments.A = conv_workspace_.A->data();
conv_workspace_.arguments.B = conv_workspace_.B->data();
conv_workspace_.arguments.C = conv_workspace_.C->data();
conv_workspace_.arguments.D = conv_workspace_.Reference->data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run device reference operation
//
status = reference_op->run(
&conv_workspace_.arguments,
host_workspace_reference_op.data());
// Handle errors
if (status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotVerified;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kReferenceDevice] = compare_tensors(
options,
*conv_workspace_.Computed,
*conv_workspace_.Reference,
conv_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kReferenceDevice] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
static_cast<library::ConvDescription const &>(operation->description()),
library::Provider::kCUTLASS,
library::Provider::kReferenceDevice);
}
// Return true means continue profiling
return true;
}
/// Measures performance results
bool Conv2dOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing Conv2d arguments
conv_workspace_.arguments.A = conv_workspace_.A->data();
conv_workspace_.arguments.B = conv_workspace_.B->data();
conv_workspace_.arguments.C = conv_workspace_.C->data();
conv_workspace_.arguments.D = conv_workspace_.Computed->data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
// update library::ConvArguments for parallel split-k reduction
conv_workspace_.arguments.D = conv_workspace_.device_workspace.data();
conv_workspace_.arguments.alpha = problem_.alpha_one.data();
conv_workspace_.arguments.beta = problem_.beta_zero.data();
/// initialize library::ReductionArguments
conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
conv_workspace_.reduction_arguments.source = conv_workspace_.C->data();
conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data();
conv_workspace_.reduction_arguments.alpha = problem_.alpha.data();
conv_workspace_.reduction_arguments.beta = problem_.beta.data();
conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
}
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&conv_workspace_.arguments,
conv_workspace_.host_workspace.data(),
conv_workspace_.device_workspace.data()
);
}
return true;
}
/// Method to profile a CUTLASS Operation
Status Conv2dOperationProfiler::profile_cutlass_(
double &runtime,
Options const &options,
library::Operation const *operation,
void *arguments,
void *host_workspace,
void *device_workspace) {
GpuTimer timer;
// initialize conv2d underlying operation to handle parallel reduction
library::Operation const* underlying_operation = operation;
library::ConvArguments *conv_arguments = static_cast<library::ConvArguments *>(arguments);
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
//
// Optional sleep to limit power consumption and thermals
//
sleep(options.profiling.sleep_duration);
//
// Warmup loop
//
Status status;
for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) {
// Setup rotating workspace
int workspace_idx = options.profiling.warmup_iterations + iteration;
int problem_idx = (workspace_idx % conv_workspace_.problem_count);
conv_arguments->A = conv_workspace_.A->batch_data(problem_idx);
conv_arguments->B = conv_workspace_.B->batch_data(problem_idx);
conv_arguments->C = conv_workspace_.C->batch_data(problem_idx);
conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx);
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
// update library::ConvArguments for parallel split-k reduction
conv_arguments->D = conv_workspace_.device_workspace.data();
/// initialize library::ReductionArguments
conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx);
conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx);
}
// Run underlying conv2d operation
status = underlying_operation->run(
arguments,
host_workspace,
device_workspace);
// Run parallel reduction kernel for parallel split_k_mode
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
status = reduction_op_->run(
&conv_workspace_.reduction_arguments,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
}
if (status != Status::kSuccess) {
return status;
}
}
//
// Initialize GPU timer
//
timer.start();
//
// Profiling loop
//
int Iterations = options.profiling.iterations;
int iteration = 0;
for (; iteration < Iterations; ++iteration) {
// Setup rotating workspace
int problem_idx = (iteration % conv_workspace_.problem_count);
conv_arguments->A = conv_workspace_.A->batch_data(problem_idx);
conv_arguments->B = conv_workspace_.B->batch_data(problem_idx);
conv_arguments->C = conv_workspace_.C->batch_data(problem_idx);
conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx);
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
// update library::ConvArguments for parallel split-k reduction
conv_arguments->D = conv_workspace_.device_workspace.data();
/// initialize library::ReductionArguments
conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx);
conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx);
}
// Run underlying conv2d operation
status = underlying_operation->run(
arguments,
host_workspace,
device_workspace);
// Run parallel reduction kernel for parallel split_k_mode
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
status = reduction_op_->run(
&conv_workspace_.reduction_arguments,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
}
if (status != Status::kSuccess) {
return status;
}
}
//
// Wait for completion
//
timer.stop_and_wait();
//
// Update performance result
//
runtime = timer.duration(iteration);
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if CUTLASS_ENABLE_CUDNN
/// Verifies CUTLASS against cudnn reference
bool Conv2dOperationProfiler::verify_with_cudnn_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
auto &conv_desc = static_cast<library::ConvDescription const &>(operation->description());
//
// Construct cudnn operators
//
CudnnCreate handle;
cudnnStatus_t status = handle.get_cudnn_create_status();
if (status != CUDNN_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status);
return true;
}
//
// Initialize state
//
// Initialize structure containing Conv2d arguments
conv_workspace_.arguments.A = conv_workspace_.A->data();
conv_workspace_.arguments.B = conv_workspace_.B->data();
conv_workspace_.arguments.D = conv_workspace_.Reference->data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
// cuDNN does not support four tensor arguments, so we copy the tensor C data into
// tensor D.
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
conv_workspace_.arguments.C = conv_workspace_.arguments.D;
try {
//
// Construct dispatcher to cudnn operator
//
detail::cudnnConvDispatcher conv_op(
conv_desc,
conv_workspace_.configuration,
conv_workspace_.arguments,
handle
);
if (conv_op.status != Status::kSuccess) {
if (conv_op.status == Status::kErrorNotSupported) {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported;
} else {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed;
}
return true;
}
status = conv_op(handle);
// Handle errors
if (status != CUDNN_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status);
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUDNN] = compare_tensors(
options,
*conv_workspace_.Computed,
*conv_workspace_.Reference,
conv_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUDNN] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
conv_desc,
library::Provider::kCUTLASS,
library::Provider::kCUDNN);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed;
}
// Return true means continue profiling
return true;
}
#endif // #if CUTLASS_ENABLE_CUDNN
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/conv2d_operation_profiler.cu/0 | {
"file_path": "tools/profiler/src/conv2d_operation_profiler.cu",
"repo_id": "tools",
"token_count": 20115
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cutlass/profiler/cublas_helpers.h"
#include "cutlass/profiler/rank_2k_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
Rank2KOperationProfiler::Rank2KOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kRank2K,
{
{ArgumentTypeID::kEnumerated, {"rank_k_kind"}, "Variant of RankK (universal)"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the RankK problem space"},
{ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the RankK problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for RankK kernel (lower or upper)"},
{ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for RankK kernel (symmetric or hermitian)"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of RankK computed in one batch"},
},
{ library::Provider::kCUBLAS}
) {
description_ = " Rank 2k Update. D = alpha * (A*B^T + B*A^T) + beta * C (symmetric) or D = alpha * (A*B^H+B*A^H) + beta * C (hermitian)";
}
/// Destructor
Rank2KOperationProfiler::~Rank2KOperationProfiler() {
}
/// Prints usage statement for the math function
void Rank2KOperationProfiler::print_usage(std::ostream &out) const {
out << "RankK" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void Rank2KOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size Syrk kernel:\n"
<< " $ cutlass_profiler --operation=rank_2k --blas_mode=symmetric --n=1024 --k=128\n\n"
<< "Profile a particular problem size Herk kernel:\n"
<< " $ cutlass_profiler --operation=rank_2k --blas_mode=hermitian --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=rank_2k --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=rank_2k --accumulator-type=f16,f32\n\n"
<< "Schmoo over fill modees:\n"
<< " $ cutlass_profiler --operation=rank_2k --fill_mode=lower/upper\n\n"
<< "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=rank_2k --A=f16:column or --A=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=rank_2k --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=rank_2k --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=rank_2k --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=rank_2k --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to rank_2k kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=rank_2k \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status Rank2KOperationProfiler::RankKProblem::parse(
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->n), int(this->k)}).front();
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->n), int(this->k)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->n), int(this->n)}).front();
return Status::kSuccess;
}
/// Total number of bytes loaded
int64_t Rank2KOperationProfiler::RankKProblem::bytes(library::RankKDescription const &operation_desc) const {
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes =
2 * int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k +
2 * int64_t(library::sizeof_bits(operation_desc.B.element) * n / 8) * k +
// Half matrix including the diagonal will have (N*(N+1))/2 elements
int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
}
bytes *= batch_count;
return bytes;
}
/// Total number of flops computed
int64_t Rank2KOperationProfiler::RankKProblem::flops(library::RankKDescription const &operation_desc) const {
// FLOPs = 2 * n(n+1)k/2 [mma1] + 2 * n(n+1)k/2 [mma2] + 2 * n(n+1)/2 [epilogue]
// FLOPs = n(n+1)(2k + 1)
int64_t flops_ = n * (n + 1) * (2*k + 1);
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddGaussianComplex:
flops_ *= 3;
break;
default: break;
}
return flops_;
}
/// Initializes a performance result
void Rank2KOperationProfiler::RankKProblem::initialize_result(
PerformanceResult &result,
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "rank_k_kind", problem_space, library::to_string(operation_desc.rank_k_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode));
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status Rank2KOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::RankKDescription const &operation_desc =
static_cast<library::RankKDescription const &>(operation->description());
if (operation_desc.rank_k_kind != library::RankKKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
rank_k_workspace_.configuration.problem_size.m() = int(problem_.n);
rank_k_workspace_.configuration.problem_size.n() = int(problem_.n);
rank_k_workspace_.configuration.problem_size.k() = int(problem_.k);
rank_k_workspace_.configuration.lda = problem_.lda;
rank_k_workspace_.configuration.ldb = problem_.ldb;
rank_k_workspace_.configuration.ldc = problem_.ldc;
rank_k_workspace_.configuration.ldd = problem_.ldc;
//rank_k_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
rank_k_workspace_.configuration.batch_count = int(problem_.split_k_slices);
rank_k_workspace_.arguments.A = nullptr;
rank_k_workspace_.arguments.B = nullptr;
rank_k_workspace_.arguments.C = nullptr;
rank_k_workspace_.arguments.D = nullptr;
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&rank_k_workspace_.configuration, &rank_k_workspace_.arguments);
}
/// Initializes the performance result
void Rank2KOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
result.bytes = problem_.bytes(operation_desc);
result.flops = problem_.flops(operation_desc);
result.runtime = 0;
}
/// Initializes workspace
Status Rank2KOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::RankKDescription const &operation_desc =
static_cast<library::RankKDescription const &>(operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
int seed_shift = 0;
rank_k_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.n), int(problem_.k)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
rank_k_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
{int(problem_.n), int(problem_.k)},
{int(problem_.ldb)},
1, // batch_count
seed_shift++
);
rank_k_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)},
1, // batch_count
seed_shift++
);
rank_k_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)}
);
rank_k_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)}
);
rank_k_workspace_.Computed->copy_from_device(rank_k_workspace_.C->data());
rank_k_workspace_.Reference->copy_from_device(rank_k_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(&rank_k_workspace_.configuration);
rank_k_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(&rank_k_workspace_.configuration);
rank_k_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = operation->initialize(
&rank_k_workspace_.configuration,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kRank2K;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool Rank2KOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.B = rank_k_workspace_.B->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&rank_k_workspace_.arguments,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const & rank_k_desc = static_cast<library::RankKDescription const &>(operation->description());
if (cublas_satisfies(rank_k_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool Rank2KOperationProfiler::verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
#if CUTLASS_ENABLE_CUBLAS
library::RankKDescription const &rank_k_desc =
static_cast<library::RankKDescription const &>(operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
cublasStatus_t status = handle.get_cublas_create_status();
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublas<t>Syr2k()
//
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.B = rank_k_workspace_.B->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.Reference->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Reference->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
detail::cublasRankKDispatcher rank_k_op(
rank_k_desc,
rank_k_workspace_.configuration,
rank_k_workspace_.arguments
);
if (rank_k_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = rank_k_op(handle);
// Handle errors
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
options,
*rank_k_workspace_.Computed,
*rank_k_workspace_.Reference
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
rank_k_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool Rank2KOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.B = rank_k_workspace_.B->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&rank_k_workspace_.arguments,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/rank_2k_operation_profiler.cu/0 | {
"file_path": "tools/profiler/src/rank_2k_operation_profiler.cu",
"repo_id": "tools",
"token_count": 8854
} | 61 |
/******************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief cuda kernels to do avg/max pooling on a device memory tensor with NHWC layout.
*/
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
#include "device_utils.h"
#include <float.h>
namespace cutlass {
/** \brief interface to do avg/max pooling on a device memory tensor with NHWC layout.
* \tparam T: data type
*/
template <typename T>
void pooling_nhwc(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord filter_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
cutlass::MatrixCoord padding,
cutlass::MatrixCoord stride,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNHWC> ref_output,
int poolingType, //0 for avg pooling ; 1 for max pooling
cudaStream_t stream);
/** get the output size of pooling
*/
inline int getOutputSize(int H_W, int padding, int kernel_size, int stride)
{
return (H_W + 2 * padding - kernel_size) / stride + 1;
}
/**
* input is [N, H, W, C]
* assume stride == kernel_size
* output_h = (H + 2*padding_H - kernel_H)/stride_H
* output_w = (W + 2*padding_W - kernel_W)/stride_W
* output is [N, output_h, output_w, C]
* grid(N, output_h, output_w)
* block(min(C, 256)) :
* each block deals with C elements of output when each thread deals with ((C + 255)/256 element of output)
*/
template<typename T, bool IS_AVG_POOLING>
__global__ void pooling_nhwc_element1_kernel(T* output,
const T* input,
const int N,
const int H,
const int W,
const int C,
const int output_H,
const int output_W,
const int kernel_H,
const int kernel_W,
const int stride_H,
const int stride_W,
const int padding_H,
const int padding_W)
{
const int tid = threadIdx.x;
const int n_idx = blockIdx.x;
const int output_h_idx = blockIdx.y;
const int output_w_idx = blockIdx.z;
int h_start_idx = output_h_idx * stride_H - padding_H;
int h_end_idx = h_start_idx + kernel_H;
h_start_idx = (h_start_idx < 0) ? 0 : h_start_idx;
h_end_idx = h_end_idx > H ? H : h_end_idx;
int w_start_idx = output_w_idx * stride_W - padding_W;
int w_end_idx = w_start_idx + kernel_W;
w_start_idx = (w_start_idx < 0) ? 0 : w_start_idx;
w_end_idx = w_end_idx > W ? W : w_end_idx;
input += n_idx * H * W * C;
output += ((n_idx * output_H + output_h_idx) * output_W + output_w_idx) * C;
const int kernel_size2 = kernel_H * kernel_W;
for (int c_idx = tid; c_idx < C; c_idx += blockDim.x) {
float pooling;
if (IS_AVG_POOLING){
pooling = 0.0f;
}
else{
pooling = -FLT_MAX;
}
for (int h = h_start_idx; h < h_end_idx; h++) {
for (int w = w_start_idx; w < w_end_idx; w++) {
const int idx = (h * W + w) * C;
const float tmp = static_cast<float>(input[idx + c_idx]);
if (IS_AVG_POOLING){
pooling = pooling + tmp;
}
else{
pooling = pooling > tmp ? pooling : tmp;
}
}
}
T output_val;
if (IS_AVG_POOLING){
output_val = T(pooling/kernel_size2);
}
else{
output_val = T(pooling);
}
output[c_idx] = output_val;
}
}
template<typename T2, typename T, bool IS_AVG_POOLING>
__global__ void pooling_nhwc_element2_kernel(T2* output,
const T2* input,
const int N,
const int H,
const int W,
const int C,
const int output_H,
const int output_W,
const int kernel_H,
const int kernel_W,
const int stride_H,
const int stride_W,
const int padding_H,
const int padding_W)
{
const int tid = threadIdx.x;
const int n_idx = blockIdx.x;
const int output_h_idx = blockIdx.y;
const int output_w_idx = blockIdx.z;
int h_start_idx = output_h_idx * stride_H - padding_H;
int h_end_idx = h_start_idx + kernel_H;
h_start_idx = (h_start_idx < 0) ? 0 : h_start_idx;
h_end_idx = h_end_idx > H ? H : h_end_idx;
int w_start_idx = output_w_idx * stride_W - padding_W;
int w_end_idx = w_start_idx + kernel_W;
w_start_idx = (w_start_idx < 0) ? 0 : w_start_idx;
w_end_idx = w_end_idx > W ? W : w_end_idx;
input += n_idx * H * W * C;
output += ((n_idx * output_H + output_h_idx) * output_W + output_w_idx) * C;
const int kernel_size2 = kernel_H * kernel_W;
for (int c_idx = tid; c_idx < C; c_idx += blockDim.x) {
float2 pooling;
if (IS_AVG_POOLING) {
pooling = {0.0f, 0.0f};
}
else {
pooling = {-FLT_MAX, -FLT_MAX};
}
for (int h = h_start_idx; h < h_end_idx; h++) {
for (int w = w_start_idx; w < w_end_idx; w++) {
const int idx = (h * W + w) * C;
const T2 tmp = input[idx + c_idx];
const float2 tmp_flt2 = {static_cast<float>(tmp.x), static_cast<float>(tmp.y)};
if (IS_AVG_POOLING) {
pooling.x += tmp_flt2.x;
pooling.y += tmp_flt2.y;
}
else {
pooling.x = pooling.x > tmp_flt2.x ? pooling.x : tmp_flt2.x;
pooling.y = pooling.y > tmp_flt2.y ? pooling.y : tmp_flt2.y;
}
}
}
T2 output_val;
if (IS_AVG_POOLING) {
output_val.x = T(pooling.x/kernel_size2);
output_val.y = T(pooling.y/kernel_size2);
}
else {
output_val.x = T(pooling.x);
output_val.y = T(pooling.y);
}
output[c_idx] = output_val;
}
}
/**
* output [N, 1, 1, C]
* input [N, H, W, C]
* grid(C, N)
* block(block_size) -- each block deals with H*W/block_size elements;
*/
template<typename T, bool IS_AVG_POOLING>
__global__ void pooling_nxhTo1x1_element1_kernel(
T* output, const T* input, const int N, const int HW, const int C)
{
const int c_idx = blockIdx.x;
const int n_idx = blockIdx.y;
float pooling[1];
if (IS_AVG_POOLING) {
pooling[0] = 0.0f;
}
else {
pooling[0] = -FLT_MAX;
}
const size_t input_offset = n_idx * HW * C + c_idx;
input += input_offset;
const size_t output_offset = n_idx * C + c_idx;
output += output_offset;
int tid = threadIdx.x;
for (int index = tid; index < HW; index += blockDim.x) {
float val = static_cast<float>(input[index * C]);
if (IS_AVG_POOLING) {
pooling[0] += val;
}
else {
pooling[0] = pooling[0] > val ? pooling[0] : val;
}
}
if (blockDim.x <= 32) {
if (IS_AVG_POOLING) {
warpReduceSum<float, 1>(pooling);
}
else {
warpReduceMax<float, 1>(pooling);
}
}
else {
if (IS_AVG_POOLING) {
blockReduceSum<float, 1>(pooling);
}
else {
blockReduceMax<float, 1>(pooling);
}
}
__syncthreads();
if (threadIdx.x == 0) {
T output_val;
if (IS_AVG_POOLING) {
output_val = T(pooling[0] / HW);
}
else {
output_val = T(pooling[0]);
}
output[0] = output_val;
}
}
/**
* output [N, 1, 1, C]
* input [N, H, W, C]
* grid(C/2, N)
* block(block_size) -- each thread deals with H*W/block_size * 2 elements;
*/
template<typename T2, typename T, bool IS_AVG_POOLING>
__global__ void pooling_nxhTo1x1_element2_kernel(
T2* output, const T2* input, const int N, const int HW, const int C)
{
const int c_idx = blockIdx.x;
const int n_idx = blockIdx.y;
float pooling[2];
if (IS_AVG_POOLING) {
pooling[0] = pooling[1] = 0.0f;
}
else {
pooling[0] = pooling[1] = -FLT_MAX;
}
const int C_2 = C / 2;
const size_t input_offset = n_idx * HW * C_2 + c_idx;
input += input_offset;
const size_t output_offset = n_idx * C_2 + c_idx;
output += output_offset;
int tid = threadIdx.x;
for (int index = tid; index < HW; index += blockDim.x) {
T2 val = input[index * C_2];
float2 val_flt2 = {static_cast<float>(val.x), static_cast<float>(val.y)};
if (IS_AVG_POOLING) {
pooling[0] += val_flt2.x;
pooling[1] += val_flt2.y;
}
else {
pooling[0] = pooling[0] > val_flt2.x ? pooling[0] : val_flt2.x;
pooling[1] = pooling[1] > val_flt2.y ? pooling[1] : val_flt2.y;
}
}
if (blockDim.x <= 32) {
if (IS_AVG_POOLING) {
warpReduceSum<float, 2>(pooling);
}
else {
warpReduceMax<float, 2>(pooling);
}
}
else {
if (IS_AVG_POOLING) {
blockReduceSum<float, 2>(pooling);
}
else {
blockReduceMax<float, 2>(pooling);
}
}
__syncthreads();
if (threadIdx.x == 0) {
T2 output_val;
if (IS_AVG_POOLING) {
output_val.x = T(pooling[0] / HW);
output_val.y = T(pooling[1] / HW);
}
else {
output_val.x = T(pooling[0]);
output_val.y = T(pooling[1]);
}
output[0] = output_val;
}
}
template <typename T>
void pooling_nhwc(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord filter_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
cutlass::Tensor4DCoord padding,
cutlass::MatrixCoord stride,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNHWC> ref_output,
int poolingType, //0 for avg pooling ; 1 for max pooling
cudaStream_t stream) {
assert(input_tensor_size.n() == output_tensor_size.n() &&
input_tensor_size.c() == output_tensor_size.c());
assert(filter_tensor_size.h() == stride.row() &&
filter_tensor_size.w() == stride.column());
const int N = input_tensor_size.n();
const int H = input_tensor_size.h();
const int W = input_tensor_size.w();
const int C = input_tensor_size.c();
const int padding_H = padding.h();
const int padding_W = padding.w();
const int kernel_H = filter_tensor_size.h();
const int kernel_W = filter_tensor_size.w();
const int stride_H = stride.row();
const int stride_W = stride.column();
const int output_H = getOutputSize(H, padding_H, kernel_H, stride_H);
const int output_W = getOutputSize(W, padding_W, kernel_W, stride_W);
assert(output_tensor_size.h() == output_H &&
output_tensor_size.w() == output_W);
if (C % 2 != 0) {
if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) {
dim3 grid(C, N);
dim3 block(256);
if (H*W < block.x){
block.x = (H*W + 31)/32*32;
}
if (poolingType == 0) {
pooling_nxhTo1x1_element1_kernel<T, true><<<grid, block, 0, stream>>>(
ref_output.data(),
ref_input.data(),
N,
H*W,
C);
} // if (poolingType == 0)
else {
pooling_nxhTo1x1_element1_kernel<T, false><<<grid, block, 0, stream>>>(
ref_output.data(),
ref_input.data(),
N,
H*W,
C);
}
} // if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0))
else {
dim3 grid(N, output_H, output_W);
dim3 block(256);
if (C < block.x) {
block.x = C;
}
if (poolingType == 0) {
pooling_nhwc_element1_kernel<T, true><<<grid, block, 0, stream>>>(
ref_output.data(),
ref_input.data(),
N,
H,
W,
C,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
} // if (poolingType == 0)
else {
pooling_nhwc_element1_kernel<T, false><<<grid, block, 0, stream>>>(
ref_output.data(),
ref_input.data(),
N,
H,
W,
C,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
}
}
} // if (C % 2 != 0))
else {
if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) {
dim3 grid(C/2, N);
dim3 block(256);
if (H*W < block.x){
block.x = (H*W + 31)/32*32;
}
if (poolingType == 0) {
if (std::is_same<T, float>::value) {
pooling_nxhTo1x1_element2_kernel<float2, float, true><<<grid, block, 0, stream>>>(
(float2*)(ref_output.data()),
(const float2*)(ref_input.data()),
N,
H*W,
C);
} // if (std::is_same<T, float>::value)
else {
pooling_nxhTo1x1_element2_kernel<half2, half, true><<<grid, block, 0, stream>>>(
(half2*)(ref_output.data()),
(const half2*)(ref_input.data()),
N,
H*W,
C);
}
} // if (poolingType == 0)
else {
if (std::is_same<T, float>::value) {
pooling_nxhTo1x1_element2_kernel<float2, float, false><<<grid, block, 0, stream>>>(
(float2*)(ref_output.data()),
(const float2*)(ref_input.data()),
N,
H*W,
C);
} // if (std::is_same<T, float>::value)
else {
pooling_nxhTo1x1_element2_kernel<half2, half, false><<<grid, block, 0, stream>>>(
(half2*)(ref_output.data()),
(const half2*)(ref_input.data()),
N,
H*W,
C);
}
}
} // if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0))
else {
dim3 grid(N, output_H, output_W);
dim3 block(256);
if (C/2 < block.x) {
block.x = C/2;
}
if (poolingType == 0) {
if (std::is_same<T, float>::value) {
pooling_nhwc_element2_kernel<float2, float, true><<<grid, block, 0, stream>>>(
(float2*)(ref_output.data()),
(const float2*)(ref_input.data()),
N,
H,
W,
C/2,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
} // if (std::is_same<T, float>::value)
else {
pooling_nhwc_element2_kernel<half2, half, true><<<grid, block, 0, stream>>>(
(half2*)(ref_output.data()),
(const half2*)(ref_input.data()),
N,
H,
W,
C/2,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
}
} // if (poolingType == 0)
else {
if (std::is_same<T, float>::value) {
pooling_nhwc_element2_kernel<float2, float, false><<<grid, block, 0, stream>>>(
(float2*)(ref_output.data()),
(const float2*)(ref_input.data()),
N,
H,
W,
C/2,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
} // if (std::is_same<T, float>::value)
else {
pooling_nhwc_element2_kernel<half2, half, false><<<grid, block, 0, stream>>>(
(half2*)(ref_output.data()),
(const half2*)(ref_input.data()),
N,
H,
W,
C/2,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
}
}
}
}
}
} //namespace cutlass
| tools/util/include/cutlass/util/device_nhwc_pooling.h/0 | {
"file_path": "tools/util/include/cutlass/util/device_nhwc_pooling.h",
"repo_id": "tools",
"token_count": 9927
} | 62 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for CONV in host-side code.
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/complex.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cute/tensor.hpp"
#include <cuda_runtime.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::reference::host {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template<class EngineAct, class LayoutAct>
bool
is_activation_in_bounds(
cute::Tensor<EngineAct, LayoutAct> const& activation,
int32_t n_, int32_t d_, int32_t h_, int32_t w_, int32_t c_) {
return ((n_ >= 0 && n_ < size<4>(activation)) &&
(d_ >= 0 && d_ < size<3>(activation)) &&
(h_ >= 0 && h_ < size<2>(activation)) &&
(w_ >= 0 && w_ < size<1>(activation)) &&
(c_ >= 0 && c_ < size<0>(activation)));
}
template<class EngineAct, class LayoutAct>
bool
is_activation_in_bounds(
cute::Tensor<EngineAct, LayoutAct> const& activation,
int32_t n_, int32_t h_, int32_t w_, int32_t c_) {
return ((n_ >= 0 && n_ < size<3>(activation)) &&
(h_ >= 0 && h_ < size<2>(activation)) &&
(w_ >= 0 && w_ < size<1>(activation)) &&
(c_ >= 0 && c_ < size<0>(activation)));
}
template<class EngineAct, class LayoutAct>
bool
is_activation_in_bounds(
cute::Tensor<EngineAct, LayoutAct> const& activation,
int32_t n_, int32_t w_, int32_t c_) {
return ((n_ >= 0 && n_ < size<2>(activation)) &&
(w_ >= 0 && w_ < size<1>(activation)) &&
(c_ >= 0 && c_ < size<0>(activation)));
}
} // namespace detail
template<
class ElementAcc_,
class ElementScalar_,
class ElementCompute_,
class ElementC_,
class ElementOut_,
class TensorAlpha_,
class TensorBeta_,
class TensorBias_,
class ActivationFunctor_ = cutlass::epilogue::thread::Identity<ElementCompute_>
>
struct ConvEpilogueFusionParams {
using ElementAcc = ElementAcc_;
using ElementScalar = ElementScalar_;
using ElementCompute = ElementCompute_;
using ElementC = ElementC_;
using ElementOut = ElementOut_;
using TensorAlpha = TensorAlpha_;
using TensorBeta = TensorBeta_;
using TensorBias = TensorBias_;
using ActivationFunctor = ActivationFunctor_;
ElementScalar alpha = ElementScalar(1);
ElementScalar beta = ElementScalar(0);
TensorAlpha tensor_alpha{};
TensorBeta tensor_beta{};
TensorBias tensor_bias{};
};
template<
cutlass::conv::Operator ConvOp,
int NumSpatialDims,
class TensorA,
class TensorB,
class TensorC,
class TensorD,
class ShapePadding,
class StrideTraversal,
class ShapeDilation,
class EpilogueFusionParams>
struct ConvReferenceImpl {
using ElementAcc = typename EpilogueFusionParams::ElementAcc;
using ElementC = typename EpilogueFusionParams::ElementC;
using ElementOut = typename EpilogueFusionParams::ElementOut;
using ElementScalar = typename EpilogueFusionParams::ElementScalar;
using ElementCompute = typename EpilogueFusionParams::ElementCompute;
using ElementBias = typename EpilogueFusionParams::TensorBias::value_type;
using ActivationFunctor = typename EpilogueFusionParams::ActivationFunctor;
// Input related converter
NumericConverter<ElementCompute, ElementAcc> acc_converter;
NumericConverter<ElementCompute, ElementC> residual_converter;
NumericConverter<ElementCompute, ElementBias> bias_converter;
// Scale related converter
NumericConverter<ElementCompute, ElementScalar> scale_converter;
// Output related converter
NumericConverter<ElementOut, ElementCompute> output_converter;
EpilogueFusionParams& epi_fusion_params_;
TensorA const& tensor_a_;
TensorB const& tensor_b_;
TensorC const& tensor_c_;
TensorD& tensor_d_;
ShapePadding const& padding_;
StrideTraversal const& tstride_;
ShapeDilation const& dilation_;
// Epilogue activation operation
ActivationFunctor epi_activation;
ConvReferenceImpl(
TensorA const& tensor_a,
TensorB const& tensor_b,
TensorC const& tensor_c,
TensorD& tensor_d,
ShapePadding const& padding,
StrideTraversal const& tstride,
ShapeDilation const& dilation,
EpilogueFusionParams& epi_fusion_params)
: tensor_a_(tensor_a),
tensor_b_(tensor_b),
tensor_c_(tensor_c),
tensor_d_(tensor_d),
padding_(padding),
tstride_(tstride),
dilation_(dilation),
epi_fusion_params_(epi_fusion_params) {
static_assert(rank(ShapePadding{}) == rank(ShapeDilation{}));
static_assert(rank(ShapePadding{}) == rank(StrideTraversal{}));
}
void compute_reference() {
if constexpr (ConvOp == cutlass::conv::Operator::kFprop) {
fprop_reference(cute::Int<NumSpatialDims>{});
}
else if constexpr (ConvOp == cutlass::conv::Operator::kDgrad) {
dgrad_reference(cute::Int<NumSpatialDims>{});
}
else {
wgrad_reference(cute::Int<NumSpatialDims>{});
}
}
private:
// Specialization for 1D fprop kernel
void fprop_reference(cute::Int<1> spatial_dims) {
int32_t N = size<2>(tensor_d_);
int32_t Q = size<1>(tensor_d_);
int32_t K = size<0>(tensor_d_);
int32_t S = size<1>(tensor_b_);
int32_t C = size<0>(tensor_b_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(2)
#endif
for (int32_t n = 0; n < N; ++n) {
for (int32_t q = 0; q < Q; ++q) {
for (int32_t k = 0; k < K; ++k) {
auto accumulator = ElementAcc(0);
for (int32_t s = 0; s < S; ++s) {
for (int32_t c = 0; c < C; ++c) {
int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_);
if (detail::is_activation_in_bounds(tensor_a_, n, w, c)) {
accumulator += ElementAcc(tensor_a_(c, w, n) * tensor_b_(c, s, k));
}
}
}
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ?
epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ?
epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta;
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(k, q, n));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[k]);
}
output = epi_activation(output);
tensor_d_(k, q, n) = output_converter(output);
}
}
}
}
// Specialization for 2D fprop kernel
void fprop_reference(cute::Int<2> spatial_dims) {
int32_t N = size<3>(tensor_d_);
int32_t P = size<2>(tensor_d_);
int32_t Q = size<1>(tensor_d_);
int32_t K = size<0>(tensor_d_);
int32_t R = size<2>(tensor_b_);
int32_t S = size<1>(tensor_b_);
int32_t C = size<0>(tensor_b_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int32_t n = 0; n < N; ++n) {
for (int32_t p = 0; p < P; ++p) {
for (int32_t q = 0; q < Q; ++q) {
for (int32_t k = 0; k < K; ++k) {
auto accumulator = ElementAcc(0);
for (int32_t r = 0; r < R; ++r) {
for (int32_t s = 0; s < S; ++s) {
for (int32_t c = 0; c < C; ++c) {
int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_);
int32_t h = p * cute::get<1>(tstride_) - cute::get<1>(padding_) + r * cute::get<1>(dilation_);
if (detail::is_activation_in_bounds(tensor_a_, n, h, w, c)) {
accumulator += ElementAcc(tensor_a_(c, w, h, n) * tensor_b_(c, s, r, k));
}
}
}
}
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ?
epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ?
epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta;
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(k, q, p, n));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[k]);
}
output = epi_activation(output);
tensor_d_(k, q, p, n) = output_converter(output);
}
}
}
}
}
// Specialization for 3D fprop kernel
void fprop_reference(cute::Int<3> spatial_dims) {
int32_t N = size<4>(tensor_d_);
int32_t Z = size<3>(tensor_d_);
int32_t P = size<2>(tensor_d_);
int32_t Q = size<1>(tensor_d_);
int32_t K = size<0>(tensor_d_);
int32_t T = size<3>(tensor_b_);
int32_t R = size<2>(tensor_b_);
int32_t S = size<1>(tensor_b_);
int32_t C = size<0>(tensor_b_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int32_t n = 0; n < N; ++n) {
for (int32_t z = 0; z < Z; ++z) {
for (int32_t p = 0; p < P; ++p) {
for (int32_t q = 0; q < Q; ++q) {
for (int32_t k = 0; k < K; ++k) {
auto accumulator = ElementAcc(0);
for (int32_t t = 0; t < T; ++t) {
for (int32_t r = 0; r < R; ++r) {
for (int32_t s = 0; s < S; ++s) {
for (int32_t c = 0; c < C; ++c) {
int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_);
int32_t h = p * cute::get<1>(tstride_) - cute::get<1>(padding_) + r * cute::get<1>(dilation_);
int32_t d = z * cute::get<2>(tstride_) - cute::get<2>(padding_) + t * cute::get<2>(dilation_);
if (detail::is_activation_in_bounds(tensor_a_, n, d, h, w, c)) {
accumulator += ElementAcc(tensor_a_(c, w, h, d, n) * tensor_b_(c, s, r, t, k));
}
}
}
}
}
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ?
epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ?
epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta;
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(k, q, p, z, n));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[k]);
}
output = epi_activation(output);
tensor_d_(k, q, p, z, n) = output_converter(output);
}
}
}
}
}
}
// Specialization for 1D dgrad kernel
void dgrad_reference(cute::Int<1> spatial_dims) {
int32_t N = size<2>(tensor_d_);
int32_t W = size<1>(tensor_d_);
int32_t C = size<0>(tensor_d_);
int32_t K = size<2>(tensor_b_);
int32_t S = size<1>(tensor_b_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(2)
#endif
for (int32_t n = 0; n < N; ++n) {
for (int32_t w = 0; w < W; ++w) {
for (int32_t c = 0; c < C; ++c) {
auto accumulator = ElementAcc(0);
for (int32_t k = 0; k < K; ++k) {
for (int32_t s = 0; s < S; ++s) {
int32_t q = w + cute::get<0>(padding_) - s * cute::get<0>(dilation_);
if (q % cute::get<0>(tstride_) == 0) {
q /= cute::get<0>(tstride_);
} else {
continue;
}
if (detail::is_activation_in_bounds(tensor_a_, n, q, k)) {
accumulator += ElementAcc(tensor_a_(k, q, n) * tensor_b_(c, s, k));
}
}
}
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data())
? epi_fusion_params_.tensor_alpha[c] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data())
? epi_fusion_params_.tensor_beta[c] : epi_fusion_params_.beta;
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(c, w, n));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[c]);
}
output = epi_activation(output);
tensor_d_(c, w, n) = output_converter(output);
}
}
}
}
// Specialization for 2D dgrad kernel
void dgrad_reference(cute::Int<2> spatial_dims) {
int32_t N = size<3>(tensor_d_);
int32_t H = size<2>(tensor_d_);
int32_t W = size<1>(tensor_d_);
int32_t C = size<0>(tensor_d_);
int32_t K = size<3>(tensor_b_);
int32_t R = size<2>(tensor_b_);
int32_t S = size<1>(tensor_b_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int32_t n = 0; n < N; ++n) {
for (int32_t h = 0; h < H; ++h) {
for (int32_t w = 0; w < W; ++w) {
for (int32_t c = 0; c < C; ++c) {
auto accumulator = ElementAcc(0);
for (int32_t k = 0; k < K; ++k) {
for (int32_t r = 0; r < R; ++r) {
for (int32_t s = 0; s < S; ++s) {
int32_t q = w + cute::get<0>(padding_) - s * cute::get<0>(dilation_);
int32_t p = h + cute::get<1>(padding_) - r * cute::get<1>(dilation_);
if (q % cute::get<0>(tstride_) == 0) {
q /= cute::get<0>(tstride_);
} else {
continue;
}
if (p % cute::get<1>(tstride_) == 0) {
p /= cute::get<1>(tstride_);
} else {
continue;
}
if (detail::is_activation_in_bounds(tensor_a_, n, p, q, k)) {
accumulator += ElementAcc(tensor_a_(k, q, p, n) * tensor_b_(c, s, r, k));
}
}
}
}
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data())
? epi_fusion_params_.tensor_alpha[c] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data())
? epi_fusion_params_.tensor_beta[c] : epi_fusion_params_.beta;
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(c, w, h, n));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[c]);
}
output = epi_activation(output);
tensor_d_(c, w, h, n) = output_converter(output);
}
}
}
}
}
// Specialization for 3D dgrad kernel
void dgrad_reference(cute::Int<3> spatial_dims) {
int32_t N = size<4>(tensor_d_);
int32_t D = size<3>(tensor_d_);
int32_t H = size<2>(tensor_d_);
int32_t W = size<1>(tensor_d_);
int32_t C = size<0>(tensor_d_);
int32_t K = size<4>(tensor_b_);
int32_t T = size<3>(tensor_b_);
int32_t R = size<2>(tensor_b_);
int32_t S = size<1>(tensor_b_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int32_t n = 0; n < N; ++n) {
for (int32_t d = 0; d < D; ++d) {
for (int32_t h = 0; h < H; ++h) {
for (int32_t w = 0; w < W; ++w) {
for (int32_t c = 0; c < C; ++c) {
auto accumulator = ElementAcc(0);
for (int32_t k = 0; k < K; ++k) {
for (int32_t t = 0; t < T; ++t) {
for (int32_t r = 0; r < R; ++r) {
for (int32_t s = 0; s < S; ++s) {
int32_t q = w + cute::get<0>(padding_) - s * cute::get<0>(dilation_);
int32_t p = h + cute::get<1>(padding_) - r * cute::get<1>(dilation_);
int32_t z = d + cute::get<2>(padding_) - t * cute::get<2>(dilation_);
if (q % cute::get<0>(tstride_) == 0) {
q /= cute::get<0>(tstride_);
} else {
continue;
}
if (p % cute::get<1>(tstride_) == 0) {
p /= cute::get<1>(tstride_);
} else {
continue;
}
if (z % cute::get<2>(tstride_) == 0) {
z /= cute::get<2>(tstride_);
} else {
continue;
}
if (detail::is_activation_in_bounds(tensor_a_, n, z, p, q, k)) {
accumulator += ElementAcc(tensor_a_(k, q, p, z, n) * tensor_b_(c, s, r, t, k));
}
}
}
}
}
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data())
? epi_fusion_params_.tensor_alpha[c] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data())
? epi_fusion_params_.tensor_beta[c] : epi_fusion_params_.beta;
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(c, w, h, d, n));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[c]);
}
output = epi_activation(output);
tensor_d_(c, w, h, d, n) = output_converter(output);
}
}
}
}
}
}
// Specialization for 1D wgrad kernel
void wgrad_reference(cute::Int<1> spatial_dims) {
int32_t N = size<2>(tensor_a_);
int32_t Q = size<1>(tensor_a_);
int32_t K = size<0>(tensor_a_);
int32_t S = size<1>(tensor_d_);
int32_t C = size<0>(tensor_d_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(2)
#endif
for (int32_t k = 0; k < K; ++k) {
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ?
epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ?
epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta;
for (int32_t s = 0; s < S; ++s) {
for (int32_t c = 0; c < C; ++c) {
auto accumulator = ElementAcc(0);
for (int32_t n = 0; n < N; ++n) {
for (int32_t q = 0; q < Q; ++q) {
int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_);
if (detail::is_activation_in_bounds(tensor_b_, n, w, c)) {
accumulator += ElementAcc(tensor_b_(c, w, n) * tensor_a_(k, q, n));
}
}
}
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(c, s, k));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[k]);
}
output = epi_activation(output);
tensor_d_(c, s, k) = output_converter(output);
}
}
}
}
// Specialization for 2D wgrad kernel
void wgrad_reference(cute::Int<2> spatial_dims) {
int32_t N = size<3>(tensor_a_);
int32_t P = size<2>(tensor_a_);
int32_t Q = size<1>(tensor_a_);
int32_t K = size<0>(tensor_a_);
int32_t R = size<2>(tensor_d_);
int32_t S = size<1>(tensor_d_);
int32_t C = size<0>(tensor_d_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int32_t k = 0; k < K; ++k) {
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ?
epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ?
epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta;
for (int32_t r = 0; r < R; ++r) {
for (int32_t s = 0; s < S; ++s) {
for (int32_t c = 0; c < C; ++c) {
auto accumulator = ElementAcc(0);
for (int32_t n = 0; n < N; ++n) {
for (int32_t p = 0; p < P; ++p) {
for (int32_t q = 0; q < Q; ++q) {
int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_);
int32_t h = p * cute::get<1>(tstride_) - cute::get<1>(padding_) + r * cute::get<1>(dilation_);
if (detail::is_activation_in_bounds(tensor_b_, n, h, w, c)) {
accumulator += ElementAcc(tensor_b_(c, w, h, n) * tensor_a_(k, q, p, n));
}
}
}
}
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(c, s, r, k));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[k]);
}
output = epi_activation(output);
tensor_d_(c, s, r, k) = output_converter(output);
}
}
}
}
}
// Specialization for 3D wgrad kernel
void wgrad_reference(cute::Int<3> spatial_dims) {
int32_t N = size<4>(tensor_a_);
int32_t Z = size<3>(tensor_a_);
int32_t P = size<2>(tensor_a_);
int32_t Q = size<1>(tensor_a_);
int32_t K = size<0>(tensor_a_);
int32_t T = size<3>(tensor_d_);
int32_t R = size<2>(tensor_d_);
int32_t S = size<1>(tensor_d_);
int32_t C = size<0>(tensor_d_);
#if defined(_OPENMP)
#pragma omp parallel for collapse(3)
#endif
for (int32_t k = 0; k < K; ++k) {
ElementScalar alpha = raw_pointer_cast(epi_fusion_params_.tensor_alpha.data()) ?
epi_fusion_params_.tensor_alpha[k] : epi_fusion_params_.alpha;
ElementScalar beta = raw_pointer_cast(epi_fusion_params_.tensor_beta.data()) ?
epi_fusion_params_.tensor_beta[k] : epi_fusion_params_.beta;
for (int32_t t = 0; t < T; ++t) {
for (int32_t r = 0; r < R; ++r) {
for (int32_t s = 0; s < S; ++s) {
for (int32_t c = 0; c < C; ++c) {
auto accumulator = ElementAcc(0);
for (int32_t n = 0; n < N; ++n) {
for (int32_t z = 0; z < Z; ++z) {
for (int32_t p = 0; p < P; ++p) {
for (int32_t q = 0; q < Q; ++q) {
int32_t w = q * cute::get<0>(tstride_) - cute::get<0>(padding_) + s * cute::get<0>(dilation_);
int32_t h = p * cute::get<1>(tstride_) - cute::get<1>(padding_) + r * cute::get<1>(dilation_);
int32_t d = z * cute::get<2>(tstride_) - cute::get<2>(padding_) + t * cute::get<2>(dilation_);
if (detail::is_activation_in_bounds(tensor_b_, n, d, h, w, c)) {
accumulator += ElementAcc(tensor_b_(c, w, h, d, n) * tensor_a_(k, q, p, z, n));
}
}
}
}
}
ElementCompute output = scale_converter(alpha) * acc_converter(accumulator) +
scale_converter(beta) * residual_converter(tensor_c_(c, s, r, t, k));
if (raw_pointer_cast(epi_fusion_params_.tensor_bias.data())) {
output += bias_converter(epi_fusion_params_.tensor_bias[k]);
}
output = epi_activation(output);
tensor_d_(c, s, r, t, k) = output_converter(output);
}
}
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // cutlass::reference::host
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/host/conv.hpp/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/conv.hpp",
"repo_id": "tools",
"token_count": 13401
} | 63 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Provides several functions for filling tensors with data.
*/
#pragma once
// Standard Library includes
#include <utility>
#include <cstdlib>
#include <cmath>
#include <random>
#include <stdexcept>
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/quaternion.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/subbyte_reference.h"
#include "cutlass/tensor_view.h"
#include "cutlass/tensor_view_planar_complex.h"
#include "cutlass/blas3.h"
#include "cutlass/util/distribution.h"
#include "tensor_foreach.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
Element value;
//
// Methods
//
TensorFillFunc(
TensorView const &view_ = TensorView(),
Element value_ = Element(0)
): view(view_), value(value_) { }
void operator()(Coord<Layout::kRank> const & coord) const {
view.at(coord) = value;
}
};
/// Returns a pair of values of the Gaussian distribution generated by the Box Muller method
struct BoxMullerFunc {
BoxMullerFunc() {}
void operator()(
double* rnd, ///< Size-2 vector to be filled with random values
double mean = 0, ///< Mean of the Gaussian distribution
double stddev = 1, ///< Standard deviation of the Gaussian distribution
double pi = std::acos(-1)) const {
double u1 = double(std::rand()) / double(RAND_MAX);
double u2 = double(std::rand()) / double(RAND_MAX);
rnd[0] = std::sqrt(-2 * std::log(u1)) * std::cos(2 * pi * u2);
rnd[1] = std::sqrt(-2 * std::log(u1)) * std::sin(2 * pi * u2);
rnd[0] = mean + stddev * rnd[0];
rnd[1] = mean + stddev * rnd[1];
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with a uniform value
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFill(
TensorView<Element, Layout> dst, ///< destination tensor
Element val = Element(0)) { ///< value to uniformly fill it with
detail::TensorFillFunc<Element, Layout> func(dst, val);
TensorForEach(
dst.extent(),
func
);
}
/// Fills a tensor with a uniform value
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFill(
TensorViewPlanarComplex<Element, Layout> dst, ///< destination tensor
cutlass::complex<Element> val = cutlass::complex<Element>(0)) { ///< value to uniformly fill it with
TensorFill(dst.view_real(), val.real());
TensorFill(dst.view_imag(), val.imag());
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element>
struct RandomGaussianFunc {
uint64_t seed;
double mean;
double stddev;
int int_scale;
double pi;
double pnz;
//
// Methods
//
RandomGaussianFunc(
uint64_t seed_ = 0,
double mean_ = 0,
double stddev_ = 1,
int int_scale_ = -1,
double pnz_ = 100.0
):
seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)), pnz(pnz_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Element operator()() const {
// Box-Muller transform to generate random numbers with Normal distribution
double u1 = double(std::rand()) / double(RAND_MAX);
double u2 = double(std::rand()) / double(RAND_MAX);
// Compute Gaussian random value
double rnd = std::sqrt(-2 * std::log(u1)) * std::cos(2 * pi * u2);
rnd = mean + stddev * rnd;
// Scale and convert final result
Element result;
// Sample from the Bernoulli distribution, and use the result to sample from the Gaussian
std::random_device rnd_device;
std::mt19937 bernoulli_rnd(rnd_device());
std::bernoulli_distribution bernoulli_dist(pnz / 100);
bool bernoulli_result = bernoulli_dist(bernoulli_rnd);
// Sample from the Gaussian distribution for a nonzero element
if (bernoulli_result) {
if (int_scale >= 0) {
rnd = double(std::llround(rnd * double(1 << int_scale))) / double(1 << int_scale);
result = static_cast<Element>(rnd);
}
else {
result = static_cast<Element>(rnd);
}
}
else {
result = static_cast<Element>(0);
}
return result;
}
};
/// Partial specialization for initializing a complex value.
template <typename Element>
struct RandomGaussianFunc<complex<Element> > {
uint64_t seed;
double mean;
double stddev;
int int_scale;
double pi;
double pnz;
//
// Methods
//
RandomGaussianFunc(
uint64_t seed_ = 0,
double mean_ = 0,
double stddev_ = 1,
int int_scale_ = -1,
double pnz_ = 100.0
):
seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)), pnz(pnz_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
complex<Element> operator()() const {
Element reals[2];
double rnd[2];
detail::BoxMullerFunc func;
func(rnd, mean, stddev, pi);
// Sample from the Bernoulli distribution, and use the result to sample from the Gaussian
std::random_device rnd_device;
std::mt19937 bernoulli_rnd(rnd_device());
std::bernoulli_distribution bernoulli_dist(pnz / 100);
bool bernoulli_result = bernoulli_dist(bernoulli_rnd);
// Sample from the Gaussian distribution for a nonzero element
if (bernoulli_result) {
if (int_scale >= 0) {
rnd[0] = double(int(rnd[0] * double(1 << int_scale)));
rnd[1] = double(int(rnd[1] * double(1 << int_scale)));
reals[0] = from_real<Element>(rnd[0] / double(1 << int_scale));
reals[1] = from_real<Element>(rnd[1] / double(1 << int_scale));
}
else {
reals[0] = from_real<Element>(rnd[0]);
reals[1] = from_real<Element>(rnd[1]);
}
}
else {
reals[0] = from_real<Element>(0);
reals[1] = from_real<Element>(0);
}
return complex<Element>(reals[0], reals[1]);
}
};
/// Partial specialization for initializing a complex value.
template <typename Element>
struct RandomGaussianFunc<Quaternion<Element> > {
uint64_t seed;
double mean;
double stddev;
int int_scale;
double pi;
double pnz;
//
// Methods
//
RandomGaussianFunc(
uint64_t seed_ = 0,
double mean_ = 0,
double stddev_ = 1,
int int_scale_ = -1,
double pnz_ = 100.0
):
seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)), pnz(pnz_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Quaternion<Element> operator()() const {
Element reals[4];
double rnd1[2];
double rnd2[2];
detail::BoxMullerFunc func;
func(rnd1, mean, stddev, pi);
func(rnd2, mean, stddev, pi);
// Sample from the Bernoulli distribution, and use the result to sample from the Gaussian
std::random_device rnd_device;
std::mt19937 bernoulli_rnd(rnd_device());
std::bernoulli_distribution bernoulli_dist(pnz / 100);
bool bernoulli_result = bernoulli_dist(bernoulli_rnd);
// Sample from the Gaussian distribution for a nonzero element
if (bernoulli_result) {
if (int_scale >= 0) {
rnd1[0] = double(int(rnd1[0] * double(1 << int_scale)));
rnd1[1] = double(int(rnd1[1] * double(1 << int_scale)));
rnd2[0] = double(int(rnd2[0] * double(1 << int_scale)));
rnd2[1] = double(int(rnd2[1] * double(1 << int_scale)));
reals[0] = from_real<Element>(rnd1[0] / double(1 << int_scale));
reals[1] = from_real<Element>(rnd1[1] / double(1 << int_scale));
reals[2] = from_real<Element>(rnd2[0] / double(1 << int_scale));
reals[3] = from_real<Element>(rnd2[1] / double(1 << int_scale));
}
else {
reals[0] = from_real<Element>(rnd1[0]);
reals[1] = from_real<Element>(rnd1[1]);
reals[2] = from_real<Element>(rnd2[0]);
reals[3] = from_real<Element>(rnd2[1]);
}
}
else {
reals[0] = from_real<Element>(0);
reals[1] = from_real<Element>(0);
reals[2] = from_real<Element>(0);
reals[3] = from_real<Element>(0);
}
return Quaternion<Element>(reals[0], reals[1], reals[2], reals[3]);
}
};
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillGaussianFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomGaussianFunc<Element> func;
//
// Methods
//
/// Construction of Gaussian RNG functor.
TensorFillGaussianFunc(
TensorView view_ = TensorView(),
RandomGaussianFunc<Element> func_ = RandomGaussianFunc<Element>()
):
view(view_), func(func_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
view.at(coord) = func();
}
};
/// Computes a random Gaussian distribution for a rank-2 tensor
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillSymmetricGaussianFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomGaussianFunc<Element> func;
cutlass::FillMode fill_mode;
//
// Methods
//
/// Construction of Gaussian RNG functor.
TensorFillSymmetricGaussianFunc(
TensorView view_ = TensorView(),
RandomGaussianFunc<Element> func_ = RandomGaussianFunc<Element>(),
cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid
):
view(view_), func(func_), fill_mode(fill_mode_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
// Fill half of matrix based on FillMode
if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kLower &&
coord[0] >= coord[1]) {
view.at(coord) = func();
} else if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kUpper &&
coord[0] <= coord[1]) {
view.at(coord) = func();
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a Gaussian distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomGaussian(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1, ///< If non-negative, specifies number of fractional bits that
double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomGaussianFunc<Element> random_func(seed, mean, stddev, bits, pnz);
detail::TensorFillGaussianFunc<Element, Layout> func(
dst,
random_func
);
TensorForEach(
dst.extent(),
func
);
}
/// Fills a tensor with random values with a Gaussian distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomGaussian(
TensorViewPlanarComplex<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1, ///< If non-negative, specifies number of fractional bits that
double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of
/// data.
TensorFillRandomGaussian(dst.view_real(), seed, mean, stddev, bits, pnz);
TensorFillRandomGaussian(dst.view_imag(), ~seed, mean, stddev, bits, pnz);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills the upper or lower part of a symmetric rank-2 tensor with random values of a Gaussian distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillSymmetricRandomGaussian(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1, ///< If non-negative, specifies number of fractional bits that
double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomGaussianFunc<Element> random_func(seed, mean, stddev, bits, pnz);
detail::TensorFillSymmetricGaussianFunc<Element, Layout> func(
dst,
random_func,
fill_mode
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values of a Gaussian distribution.
template <
typename Element ///< Element type
>
void BlockFillRandomGaussian(
Element *ptr, ///< destination buffer
size_t capacity, ///< number of elements
uint64_t seed, ///< seed for RNG
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1, ///< If non-negative, specifies number of fractional bits that
double pnz = 100.0) { /// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomGaussianFunc<Element> random_func(seed, mean, stddev, bits, pnz);
for (size_t i = 0; i < capacity; ++i) {
ReferenceFactory<Element>::get(ptr, i) = random_func();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element>
struct RandomUniformFunc {
using Real = typename RealType<Element>::Type;
uint64_t seed;
double range;
double min;
int int_scale;
//
// Methods
//
RandomUniformFunc(
uint64_t seed_ = 0,
double max = 1,
double min_ = 0,
int int_scale_ = -1
):
seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Element operator()() const {
double rnd = double(std::rand()) / double(RAND_MAX);
rnd = min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
Element result;
if (int_scale >= 0) {
rnd = double(std::llround(rnd * double(1 << int_scale))) / double(1 << int_scale);
result = static_cast<Element>(Real(rnd));
}
else {
result = static_cast<Element>(Real(rnd));
}
return result;
}
};
/// Partial specialization for initializing a complex value.
template <typename Element>
struct RandomUniformFunc<complex<Element> > {
using Real = typename RealType<Element>::Type;
uint64_t seed;
double range;
double min;
int int_scale;
//
// Methods
//
RandomUniformFunc(
uint64_t seed_ = 0,
double max = 1,
double min_ = 0,
int int_scale_ = -1
):
seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
complex<Element> operator()() const {
Element reals[2];
for (int i = 0; i < 2; ++i) {
double rnd = double(std::rand()) / double(RAND_MAX);
rnd = min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
if (int_scale >= 0) {
rnd = double(int(rnd * double(1 << int_scale)));
reals[i] = from_real<Element>(Real(rnd / double(1 << int_scale)));
}
else {
reals[i] = from_real<Element>(Real(rnd));
}
}
return complex<Element>(reals[0], reals[1]);
}
};
/// Partial specialization for initializing a Quaternion value.
template <typename Element>
struct RandomUniformFunc<Quaternion<Element> > {
using Real = typename RealType<Element>::Type;
uint64_t seed;
double range;
double min;
int int_scale;
//
// Methods
//
RandomUniformFunc(
uint64_t seed_ = 0,
double max = 1,
double min_ = 0,
int int_scale_ = -1
):
seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Quaternion<Element> operator()() const {
Element reals[4];
for (int i = 0; i < 4; ++i) {
double rnd = double(std::rand()) / double(RAND_MAX);
rnd = min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
if (int_scale >= 0) {
rnd = double(int(rnd * double(1 << int_scale)));
reals[i] = from_real<Element>(Real(rnd / double(1 << int_scale)));
}
else {
reals[i] = from_real<Element>(Real(rnd));
}
}
return make_Quaternion(reals[0], reals[1], reals[2], reals[3]);
}
};
/// Computes a random uniform distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillRandomUniformFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomUniformFunc<Element> func;
//
// Methods
//
/// Construction of uniform RNG functor.
TensorFillRandomUniformFunc(
TensorView view_ = TensorView(),
RandomUniformFunc<Element> func_ = RandomUniformFunc<Element>()
):
view(view_), func(func_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
view.at(coord) = func();
}
};
/// Fills the upper or lower part of a symmetric rank-2 tensor with random values of a uniform distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillSymmetricRandomUniformFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomUniformFunc<Element> func;
cutlass::FillMode fill_mode;
//
// Methods
//
/// Construction of uniform RNG functor.
TensorFillSymmetricRandomUniformFunc(
TensorView view_ = TensorView(),
RandomUniformFunc<Element> func_ = RandomUniformFunc<Element>(),
cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid
):
view(view_), func(func_), fill_mode(fill_mode_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
// Fill half of matrix based on FillMode
if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kLower &&
coord[0] >= coord[1]) {
view.at(coord) = func();
} else if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kUpper &&
coord[0] <= coord[1]) {
view.at(coord) = func();
}
}
};
/// Computes a random Uniform distribution and pads diagonal with zeros
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillPadDiagonalRandomUniformFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomUniformFunc<Element> func;
cutlass::FillMode fill_mode;
int alignment;
//
// Methods
//
/// Construction of uniform RNG functor.
TensorFillPadDiagonalRandomUniformFunc(
TensorView view_ = TensorView(),
RandomUniformFunc<Element> func_ = RandomUniformFunc<Element>(),
cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid,
int alignment_ = 1
):
view(view_), func(func_), fill_mode(fill_mode_), alignment(alignment_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
// Fill half of matrix based on FillMode
if (Layout::kRank == 2 &&
(fill_mode == cutlass::FillMode::kLower) &&
(coord[0] >= coord[1]) ||
((coord[1] - coord[0]) >= alignment)) {
view.at(coord) = func();
} else if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kUpper &&
(coord[0] <= coord[1]) ||
((coord[0] - coord[1]) >= alignment)) {
view.at(coord) = func();
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values of a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomUniform(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<Element> random_func(seed, max, min, bits);
detail::TensorFillRandomUniformFunc<Element, Layout> func(
dst,
random_func
);
TensorForEach(
dst.extent(),
func
);
}
/// Fills a tensor with random values of a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomUniform(
TensorViewPlanarComplex<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
TensorFillRandomUniform(dst.view_real(), seed, max, min, bits);
TensorFillRandomUniform(dst.view_imag(), ~seed, max, min, bits);
}
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomUniform(
TensorView<Quaternion<Element>, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<Quaternion<Element>> random_func(seed, max, min, bits);
detail::TensorFillRandomUniformFunc<Quaternion<Element>, Layout> func(
dst,
random_func
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillSymmetricRandomUniform(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<Element> random_func(seed, max, min, bits);
detail::TensorFillSymmetricRandomUniformFunc<Element, Layout> func(
dst,
random_func,
fill_mode
);
TensorForEach(
dst.extent(),
func
);
}
/// Fills a tensor with random values with a uniform random distribution pads zeros along diagonal
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillPadDiagonalRandomUniform(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1, ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
int alignment = 1
) {
detail::RandomUniformFunc<Element> random_func(seed, max, min, bits);
detail::TensorFillPadDiagonalRandomUniformFunc<Element, Layout> func(
dst,
random_func,
fill_mode,
alignment
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with a uniform value
template <
typename Element ///< Element type
>
void BlockFill(
Element *ptr,
size_t capacity,
Element val
) {
for (size_t i = 0; i < capacity; ++i) {
ReferenceFactory<Element>::get(ptr, i) = val;
}
}
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element ///< Element type
>
void BlockFillRandomUniform(
Element *ptr,
size_t capacity,
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<Element> random_func(seed, max, min, bits);
for (size_t i = 0; i < capacity; ++i) {
ReferenceFactory<Element>::get(ptr, i) = random_func();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillDiagonalFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
Element diag;
Element other;
//
// Methods
//
TensorFillDiagonalFunc(
TensorView const &view_ = TensorView(),
Element diag_ = Element(1),
Element other_ = Element(0)
):
view(view_), diag(diag_), other(other_) { }
void operator()(Coord<Layout::kRank> const & coord) const {
bool is_diag = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[i - 1]) {
is_diag = false;
break;
}
}
view.at(coord) = (is_diag ? diag : other);
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor everywhere with a unique value for its diagonal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillDiagonal(
TensorView<Element, Layout> dst, ///< destination tensor
Element diag = Element(1), ///< value to write in the diagonal
Element other = Element(0)) { ///< value to write off the diagonal
detail::TensorFillDiagonalFunc<Element, Layout> func(
dst,
diag,
other
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to fill a tensor's diagonal with 1 and 0 everywhere else.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillIdentity(
TensorView<Element, Layout> dst) { ///< destination tensor
TensorFillDiagonal(dst, Element(1), Element(0));
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a uniform value to the diagonal of a tensor without modifying off-diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorUpdateDiagonal(
TensorView<Element, Layout> dst, ///< destination tensor
Element val = Element(1)) {
typename Layout::Index extent = dst.extent().min();
for (typename Layout::Index i = 0; i < extent; ++i) {
Coord<Layout::kRank> coord(i);
dst.at(coord) = val;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorUpdateOffDiagonalFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
Element other;
//
// Methods
//
TensorUpdateOffDiagonalFunc(
TensorView const &view_ = TensorView(),
Element other_ = Element(0)
):
view(view_), other(other_) { }
void operator()(Coord<Layout::kRank> const & coord) const {
bool is_diag = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[i - 1]) {
is_diag = false;
break;
}
}
if (!is_diag) {
view.at(coord) = other;
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a uniform value to all elements in the tensor without modifying diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorUpdateOffDiagonal(
TensorView<Element, Layout> dst, ///< destination tensor
Element other = Element(1)) {
detail::TensorUpdateOffDiagonalFunc<Element, Layout> func(
dst,
other
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillLinearFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
Array<Element, Layout::kRank> v;
Element s;
//
// Methods
//
TensorFillLinearFunc() { }
/// Constructs functor
TensorFillLinearFunc(
TensorView const &view_,
Array<Element, Layout::kRank> const & v_,
Element s_ = Element(0)
):
view(view_), v(v_), s(s_) { }
/// Updates the tensor
void operator()(Coord<Layout::kRank> const & coord) const {
Element sum(s);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank; ++i) {
sum += Element(coord[i]) * v[i];
}
view.at(coord) = sum;
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills tensor with a linear combination of its coordinate and another vector
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillLinear(
TensorView<Element, Layout> dst, ///< destination tensor
Array<Element, Layout::kRank> const & v,
Element s = Element(0)) {
detail::TensorFillLinearFunc<Element, Layout> func(
dst,
v,
s
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills tensor with a linear combination of its coordinate and another vector
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillSequential(
TensorView<Element, Layout> dst, ///< destination tensor
Element s = Element(0)) {
Array<Element, Layout::kRank> stride;
stride[0] = Element(1);
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
stride[i] = stride[i - 1] * Element(dst.extent()[i - 1]);
}
TensorFillLinear(dst, stride, s);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values from a distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandom(
TensorView<Element, Layout> view, ///< destination tensor
uint64_t seed,
Distribution dist) {
using Real = typename RealType<Element>::Type;
if (dist.kind == Distribution::Gaussian) {
TensorFillRandomGaussian(
view,
seed,
dist.gaussian.mean,
dist.gaussian.stddev,
dist.int_scale);
} else if (dist.kind == Distribution::Uniform) {
TensorFillRandomUniform(
view,
seed,
dist.uniform.max,
dist.uniform.min,
dist.int_scale);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillSequential(
Element *ptr,
int64_t capacity,
Element v = Element(1),
Element s = Element(0)) {
int i = 0;
while (i < capacity) {
cutlass::ReferenceFactory<Element, (cutlass::sizeof_bits<Element>::value <
8)>::get(ptr, i) = s;
s = Element(s + v);
++i;
}
}
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillSequentialModN(
Element *ptr,
int64_t capacity,
int64_t mod,
int64_t v = int64_t(1),
int64_t s = int64_t(0)) {
int i = 0;
while (i < capacity) {
cutlass::ReferenceFactory<Element, (cutlass::sizeof_bits<Element>::value <
8)>::get(ptr, i) = Element(s);
s = int64_t(s + v) % mod;
++i;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillRandom(
Element *ptr,
size_t capacity,
uint64_t seed,
Distribution dist) {
if (dist.kind == Distribution::Gaussian) {
BlockFillRandomGaussian<Element>(
ptr,
capacity,
seed,
dist.gaussian.mean,
dist.gaussian.stddev,
dist.int_scale,
dist.gaussian.pnz);
}
else if (dist.kind == Distribution::Uniform) {
BlockFillRandomUniform<Element>(
ptr,
capacity,
seed,
dist.uniform.max,
dist.uniform.min,
dist.int_scale);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element>
struct RandomSparseMetaFunc {
uint64_t seed;
int range;
int MetaSizeInBits;
//
// Methods
//
RandomSparseMetaFunc(
uint64_t seed_ = 0,
int MetaSizeInBits_ = 2
):
seed(seed_), MetaSizeInBits(MetaSizeInBits_) {
std::srand((unsigned)seed);
if (MetaSizeInBits_ == 2) {
range = 6;
}
else if (MetaSizeInBits_ == 4) {
range = 2;
}
else {
throw std::invalid_argument("Invalid MetaSizeInBits");
}
}
/// Compute random value and update RNG state
Element operator()() const {
Element FourToTwoMeta[6] = {0x4, 0x8, 0x9, 0xc, 0xd, 0xe};
Element TwoToOneMeta[2] = {0x4, 0xe};
Element * MetaArray = (MetaSizeInBits == 2) ? FourToTwoMeta : TwoToOneMeta;
Element result = 0x0;
for (int i = 0; i < cutlass::sizeof_bits<Element>::value / 4; ++i) {
int rnd = std::rand() % range;
Element meta = MetaArray[rnd];
result = (Element)(result | ((Element)(meta << (i * 4))));
}
return result;
}
};
/// Computes a random sparse meta
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillRandomSparseMetaFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomSparseMetaFunc<Element> func;
//
// Methods
//
/// Construction of Gaussian RNG functor.
TensorFillRandomSparseMetaFunc(
TensorView view_ = TensorView(),
RandomSparseMetaFunc<Element> func_ = RandomSparseMetaFunc<Element>()
):
view(view_), func(func_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
view.at(coord) = func();
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomSparseMeta(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
int MetaSizeInBits) { ///< 2 bit or 4 bit
detail::RandomSparseMetaFunc<Element> random_func(seed, MetaSizeInBits);
detail::TensorFillRandomSparseMetaFunc<Element, Layout> func(
dst,
random_func
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element ///< Element type
>
void BlockFillRandomSparseMeta(
Element *ptr,
size_t capacity,
uint64_t seed, ///< seed for RNG
int MetaSizeInBits) { ///< 2 bit or 4bit
detail::RandomSparseMetaFunc<Element> random_func(seed, MetaSizeInBits);
for (size_t i = 0; i < capacity; ++i) {
ptr[i] = random_func();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a ell block index matrix with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomEllIdx(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
int rows, int ell_cols, int cols) { ///< dimension of the matrix
std::srand((unsigned)seed);
for (int i = 0; i < rows; ++i) {
int col_idx = std::rand() % cols;
for (int j = 0; j < ell_cols; ++j) {
dst.at({i, j}) = col_idx;
if (col_idx != -1) {
if (col_idx == (cols - 1)) {
col_idx = -1;
} else {
col_idx = std::rand() % (cols - col_idx - 1) + col_idx + 1;
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies a diagonal in from host memory without modifying off-diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorCopyDiagonalIn(
TensorView<Element, Layout> dst, ///< destination tensor
Element const *ptr) { ///< dense buffer of elements
typename Layout::Index extent = dst.extent().min();
for (typename Layout::Index i = 0; i < extent; ++i) {
Coord<Layout::kRank> coord(i);
dst.at(coord) = ReferenceFactory<Element>::get(ptr, i);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies the diagonal of a tensor into a dense buffer in host memory.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorCopyDiagonalOut(
Element *ptr, ///< dense buffer of elements
TensorView<Element, Layout> src) { ///< source tensor
typename Layout::Index extent = src.extent().min();
for (typename Layout::Index i = 0; i < extent; ++i) {
Coord<Layout::kRank> coord(i);
ReferenceFactory<Element>::get(ptr, i) = src.at(coord);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/host/tensor_fill.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/tensor_fill.h",
"repo_id": "tools",
"token_count": 17993
} | 64 |
# Copyright (c) 2019 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# A small utility function which generates a C-header from an input file
function(FILE_TO_C_STRING FILENAME VARIABLE_NAME OUTPUT_STRING ZERO_TERMINATED)
FILE(READ "${FILENAME}" HEX_INPUT HEX)
if (${ZERO_TERMINATED})
string(APPEND HEX_INPUT "00")
endif()
string(REGEX REPLACE "(....)" "\\1\n" HEX_OUTPUT ${HEX_INPUT})
string(REGEX REPLACE "([0-9a-f][0-9a-f])" "char(0x\\1)," HEX_OUTPUT ${HEX_OUTPUT})
set(HEX_OUTPUT "static char const ${VARIABLE_NAME}[] = {\n ${HEX_OUTPUT}\n};\n")
set(${OUTPUT_STRING} "${HEX_OUTPUT}" PARENT_SCOPE)
endfunction()
# message("Create header file for ${FILE_IN}")
# message("Create header file for ${FILE_OUT}")
file_to_c_string(${FILE_IN} ${VARIABLE_NAME} OUTPUT_STRING ZERO_TERMINATED)
set(RESULT "#pragma once\n")
string(APPEND RESULT "namespace cutlass {\n")
string(APPEND RESULT "namespace nvrtc {\n")
string(APPEND RESULT "${OUTPUT_STRING}")
string(APPEND RESULT "} // namespace nvrtc\n")
string(APPEND RESULT "} // namespace cutlass\n")
file(WRITE "${FILE_OUT}" "${RESULT}")
| bin2hex.cmake/0 | {
"file_path": "bin2hex.cmake",
"repo_id": "bin2hex.cmake",
"token_count": 866
} | 0 |
var searchData=
[
['_5f_5fnv_5fstd_5fmax',['__NV_STD_MAX',['../platform_8h.html#abd31f291635329bc15292954f1f01d38',1,'platform.h']]],
['_5f_5fnv_5fstd_5fmin',['__NV_STD_MIN',['../platform_8h.html#a39e234a3e3b0018b58df720bcb143420',1,'platform.h']]],
['_5f_5fplatform_5fcat',['__platform_cat',['../platform_8h.html#aece7fe71be5aaf8d12dc9e2372f97de4',1,'platform.h']]],
['_5f_5fplatform_5fcat_5f',['__platform_cat_',['../platform_8h.html#acd148999a5caeba8f6fd52e7e288e659',1,'platform.h']]]
];
| docs/search/defines_0.js/0 | {
"file_path": "docs/search/defines_0.js",
"repo_id": "docs",
"token_count": 261
} | 1 |
var searchData=
[
['layouttypeid',['LayoutTypeID',['../namespacecutlass_1_1library.html#aa863c416529c1fe76555be9760619a30',1,'cutlass::library']]]
];
| docs/search/enums_4.js/0 | {
"file_path": "docs/search/enums_4.js",
"repo_id": "docs",
"token_count": 63
} | 2 |
var searchData=
[
['vector_2eh',['vector.h',['../vector_8h.html',1,'']]],
['volta_5ftensor_5fop_5fpolicy_2eh',['volta_tensor_op_policy.h',['../volta__tensor__op__policy_8h.html',1,'']]]
];
| docs/search/files_12.js/0 | {
"file_path": "docs/search/files_12.js",
"repo_id": "docs",
"token_count": 90
} | 3 |
var searchData=
[
['debug_2eh',['debug.h',['../include_2cutlass_2util_2debug_8h.html',1,'']]],
['gemm_2eh',['gemm.h',['../include_2cutlass_2gemm_2device_2gemm_8h.html',1,'']]],
['gemm_2eh',['gemm.h',['../include_2cutlass_2gemm_2kernel_2gemm_8h.html',1,'']]],
['gemm_2eh',['gemm.h',['../include_2cutlass_2gemm_2gemm_8h.html',1,'']]],
['gemm_5fcomplex_2eh',['gemm_complex.h',['../include_2cutlass_2gemm_2device_2gemm__complex_8h.html',1,'']]],
['inner_5fproduct_2eh',['inner_product.h',['../inner__product_8h.html',1,'']]],
['integer_5fsubbyte_2eh',['integer_subbyte.h',['../integer__subbyte_8h.html',1,'']]],
['interleaved_5fepilogue_2eh',['interleaved_epilogue.h',['../interleaved__epilogue_8h.html',1,'']]]
];
| docs/search/files_8.js/0 | {
"file_path": "docs/search/files_8.js",
"repo_id": "docs",
"token_count": 339
} | 4 |
var searchData=
[
['w',['w',['../structcutlass_1_1Tensor4DCoord.html#ae3136dc898c4ef079e73b51b1850ba7e',1,'cutlass::Tensor4DCoord::w() const '],['../structcutlass_1_1Tensor4DCoord.html#a3b391bf3ec3db6eec31eb23d5ff7fd21',1,'cutlass::Tensor4DCoord::w()']]],
['wait',['wait',['../classcutlass_1_1Semaphore.html#a176a4cbf65e47e9fcba9d93fc264b9c3',1,'cutlass::Semaphore']]]
];
| docs/search/functions_16.js/0 | {
"file_path": "docs/search/functions_16.js",
"repo_id": "docs",
"token_count": 188
} | 5 |
var searchData=
[
['predicate_20iterator_20concept',['Predicate Iterator Concept',['../group__predicate__iterator__concept.html',1,'']]],
['predicate_20tile_20adapter_20concept',['Predicate Tile Adapter Concept',['../group__predicate__tile__adapter.html',1,'']]],
['predicate_20vector_20concept',['Predicate Vector Concept',['../group__predicate__vector__concept.html',1,'']]]
];
| docs/search/groups_0.js/0 | {
"file_path": "docs/search/groups_0.js",
"repo_id": "docs",
"token_count": 121
} | 6 |
var searchData=
[
['uniform',['uniform',['../structcutlass_1_1Distribution.html#afc30b6976acb39e54f061af1bf2870db',1,'cutlass::Distribution']]],
['use_5fdp4a',['use_dp4a',['../classcutlass_1_1gemm_1_1warp_1_1MmaSimt.html#a39e22e3c7afea584e8425064fe72410b',1,'cutlass::gemm::warp::MmaSimt']]]
];
| docs/search/variables_12.js/0 | {
"file_path": "docs/search/variables_12.js",
"repo_id": "docs",
"token_count": 150
} | 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS layout visualization tool
*/
#include <map>
#include <iostream>
#include <iomanip>
#include <memory>
#include <cutlass/cutlass.h>
#include "options.h"
#include "register_layout.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
std::map<std::string, std::unique_ptr<VisualizeLayoutBase> > layouts;
/////////////////////////////////////////////////////////////////////////////////////////////////
void print_usage(std::ostream &out) {
out << "03_visualize_layout <layout> [options]"
<< "\n\n"
<< " Layouts:\n";
for (auto const & layout : layouts) {
out << " " << layout.first << std::string(46 - layout.first.size(), ' ');
layout.second->print_help(out);
out << "\n";
}
out << "\n";
Options::print_usage(out);
out << "\nExamples:\n\n"
<< "$ 03_visualize_layout RowMajor --extent=16,16\n"
<< "$ 03_visualize_layout \"ColumnMajorInterleaved<4>\" --extent=32,8 "
"--output-shape=16 --vectorize=4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<4,64>\" "
"--extent=64,64 --vectorize=32 --output-shape=256,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<4,128>\" "
"--extent=128,32 --vectorize=32 --output-shape=256,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<4,256>\" "
"--extent=256,16 --vectorize=32 --output-shape=256,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<8,32>\" "
"--extent=32,64 --vectorize=16 --output-shape=128,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<8,64>\" "
"--extent=64,32 --vectorize=16 --output-shape=128,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<8,128>\" "
"--extent=128,16 --vectorize=16 --output-shape=128,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<16,32>\" "
"--extent=32,32 --vectorize=8 --output-shape=64,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<16,64>\" "
"--extent=64,16 --vectorize=8 --output-shape=64,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<32,16>\" "
"--extent=16,32 --vectorize=4 --output-shape=32,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<32,32>\" "
"--extent=32,16 --vectorize=4 --output-shape=32,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicandCongruous<32,32>\" "
"--extent=32,16 --vectorize=4 --output-shape=32,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicandCongruous<64, 16>\" "
"--extent=16,16 --vectorize=2 --output-shape=16,4\n"
<< "$ 03_visualize_layout \"VoltaTensorOpMultiplicandCrosswise<16,32>\" "
"--extent=32,64 --vectorize=4 --output-shape=64,4\n"
<< "$ 03_visualize_layout \"VoltaTensorOpMultiplicandCongruous<16>\" "
"--extent=64,32 --vectorize=8 --output-shape=64,4\n";
out << std::endl;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point
int main(int argc, char const *arg[]) {
RegisterLayouts(layouts);
if (argc == 1 || (std::string(arg[0]) == "-h" || std::string(arg[1]) == "--help")) {
print_usage(std::cout);
return 0;
}
// parse command line, skipping layout name
cutlass::CommandLine cmd_line(argc - 1, arg + 1);
Options options(cmd_line);
if (options.help) {
print_usage(std::cout);
return 0;
}
if (!options.good) {
return -1;
}
std::string layout_name = arg[1];
auto layout_it = layouts.find(layout_name);
if (layout_it == layouts.end()) {
std::cerr << "Layout '" << layout_name << "' not supported." << std::endl;
return -1;
}
bool passed = layout_it->second->visualize(options);
if (!passed) {
return -1;
}
layout_it->second->print_csv(std::cout);
cudaFree(0); // Ensure CUDA is available.
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/03_visualize_layout/visualize_layout.cpp/0 | {
"file_path": "examples/03_visualize_layout/visualize_layout.cpp",
"repo_id": "examples",
"token_count": 2052
} | 8 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined Implicit GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/semaphore.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/epilogue/threadblock/output_iterator_parameter.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename B2bMma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad)
typename ConvProblemSize_ = Conv2dProblemSize ///! Convolutional operator on 2D or 3D problem
>
struct B2bImplicitGemmConvolution {
using B2bMma = B2bMma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp0 = typename B2bMma::OutputOp;
using EpilogueOutputOp1 = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static Operator const kConvolutionalOperator = ConvOperator;
using ElementA = typename B2bMma::IteratorA0::Element;
using LayoutA = typename B2bMma::IteratorA0::Layout;
using ElementB = typename B2bMma::IteratorB0::Element;
using LayoutB = typename B2bMma::IteratorB0::Layout;
using ElementC = typename EpilogueOutputOp1::ElementOutput;
/// Set output tensor C layout
using LayoutC = LayoutA;
using ElementAccumulator = typename EpilogueOutputOp0::ElementAccumulator;
using ElementCompute = typename EpilogueOutputOp0::ElementCompute;
/// Scale and Bias
using ElementScaleBias = typename B2bMma::IteratorAccumulatorScaleBias::Element;
using LayoutScaleBias = typename B2bMma::IteratorAccumulatorScaleBias::Layout;
using WarpMmaOperator0 = typename B2bMma::Policy0::Operator;
using WarpMmaOperator1 = typename B2bMma::Policy1::Operator;
using ArchMmaOperator = typename WarpMmaOperator0::ArchMmaOperator;
using MathOperator = typename ArchMmaOperator::Operator;
using OperatorClass = typename WarpMmaOperator0::OperatorClass;
using ArchTag = typename WarpMmaOperator0::ArchTag;
using ThreadblockShape0 = typename B2bMma::Shape0;
using ThreadblockShape1 = typename B2bMma::Shape1;
using WarpShape0 = typename WarpMmaOperator0::Shape;
using WarpShape1 = typename WarpMmaOperator1::Shape;
using InstructionShape = typename ArchMmaOperator::Shape;
static int const kStages = B2bMma::kStages;
static IteratorAlgorithm const kIteratorAlgorithm = B2bMma::IteratorA0::kIteratorAlgorithm;
/// Warp count (concept: GemmShape)
using WarpCount0 = typename B2bMma::WarpCount0;
static int const kThreadCount = 32 * WarpCount0::kCount;
using TensorRefA0 = typename B2bMma::IteratorA0::TensorRef;
using TensorRefB0 = typename B2bMma::IteratorB0::TensorRef;
using TensorRefScaleBias0 = typename B2bMma::IteratorAccumulatorScaleBias::TensorRef;
using TensorRefB1 = typename B2bMma::IteratorB1::TensorRef;
using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>;
/// Check iterator A and B convolution dimension are the same and
// set device::B2bImplicitGemmConvolution::kConvDim
static_assert(B2bMma::IteratorA0::kConvDim == B2bMma::IteratorB0::kConvDim,
"Convolution on different dimensions is not supported");
static int const kConvDim = B2bMma::IteratorA0::kConvDim;
/// Conv dimension and problem size structure (Conv2d or Conv3d)
using ConvProblemSize = ConvProblemSize_;
/// Wgrad C stride idx for implicit gemm algorithm
// Conv2d row-major matrix C (KxRSC)
// Conv3d row-major matrix C (KxTRSC)
static int const kWgradCStrideIdx =
cutlass::platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value ? 2 : 3;
/// This chooses the appropriate stride element of the C tensor.
static int const kTensorCStrideIdx =
(kConvolutionalOperator == conv::Operator::kWgrad ? kWgradCStrideIdx : 0);
//
//
//
using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter<
LayoutC,
typename Epilogue::OutputTileIterator::Layout,
TensorRefC,
ConvOperator,
ConvProblemSize
>;
/// Argument structure
struct Arguments {
//
// Data members
//
ConvProblemSize problem_size_0;
ConvProblemSize problem_size_1;
TensorRefA0 ref_A0;
TensorRefB0 ref_B0;
TensorRefC ref_C0;
TensorRefScaleBias0 ref_Scale0;
TensorRefScaleBias0 ref_Bias0;
TensorRefB1 ref_B1;
TensorRefC ref_C1;
TensorRefC ref_D1;
typename EpilogueOutputOp0::Params output_op_0;
typename EpilogueOutputOp1::Params output_op_1;
SplitKMode split_k_mode;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() { }
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size_0,
ConvProblemSize const & problem_size_1
):
problem_size_0(problem_size_0),
problem_size_1(problem_size_1) { }
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size_0,
ConvProblemSize const & problem_size_1,
TensorRefA0 const & ref_A0,
TensorRefB0 const & ref_B0,
TensorRefC const & ref_C0,
TensorRefScaleBias0 const & ref_Scale0,
TensorRefScaleBias0 const & ref_Bias0,
TensorRefB1 const & ref_B1,
TensorRefC const & ref_C1,
TensorRefC const & ref_D1,
typename EpilogueOutputOp0::Params const & output_op_0,
typename EpilogueOutputOp1::Params const & output_op_1,
SplitKMode const & split_k_mode = SplitKMode::kSerial
):
problem_size_0(problem_size_0),
problem_size_1(problem_size_1),
ref_A0(ref_A0),
ref_B0(ref_B0),
ref_C0(ref_C0),
ref_Scale0(ref_Scale0),
ref_Bias0(ref_Bias0),
ref_B1(ref_B1),
ref_C1(ref_C1),
ref_D1(ref_D1),
output_op_0(output_op_0),
output_op_1(output_op_1),
split_k_mode(split_k_mode)
{
}
};
/// Parameters structure
struct Params {
ConvProblemSize problem_size_0;
ConvProblemSize problem_size_1;
cutlass::gemm::GemmCoord grid_tiled_shape;
gemm::GemmCoord implicit_gemm_problem_size_0;
gemm::GemmCoord implicit_gemm_problem_size_1;
int swizzle_log_tile;
int gemm_k_iterations_0;
int gemm_k_iterations_1;
typename B2bMma::IteratorA0::Params iterator_A0;
typename B2bMma::IteratorA0::Element const *ptr_A0;
typename B2bMma::IteratorB0::Params iterator_B0;
typename B2bMma::IteratorB0::Element const *ptr_B0;
typename Epilogue::OutputTileIterator::Params iterator_C0;
typename Epilogue::OutputTileIterator::Element *ptr_C0;
typename B2bMma::IteratorAccumulatorScaleBias::Element *ptr_Scale0;
typename B2bMma::IteratorAccumulatorScaleBias::Element *ptr_Bias0;
typename B2bMma::IteratorB1::Params iterator_B1;
typename B2bMma::IteratorB1::Element const *ptr_B1;
typename Epilogue::OutputTileIterator::Params iterator_C1;
typename Epilogue::OutputTileIterator::Element *ptr_C1;
typename Epilogue::OutputTileIterator::Params iterator_D1;
typename Epilogue::OutputTileIterator::Element *ptr_D1;
typename EpilogueOutputOp0::Params output_op_0;
typename EpilogueOutputOp1::Params output_op_1;
int *semaphore;
SplitKMode split_k_mode;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(): swizzle_log_tile(0), gemm_k_iterations_0(0), gemm_k_iterations_1(0) { }
///
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
int *semaphore = nullptr
):
problem_size_0(args.problem_size_0),
problem_size_1(args.problem_size_1),
implicit_gemm_problem_size_0(cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size_0)),
implicit_gemm_problem_size_1(cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size_1)),
iterator_A0(B2bMma::IteratorA0::getParams(args.problem_size_0, args.ref_A0.layout())),
ptr_A0(args.ref_A0.data()),
iterator_B0(args.problem_size_0, args.ref_B0.layout()),
ptr_B0(args.ref_B0.data()),
iterator_C0(ConvOutputIteratorParameter::layout(args.ref_C0)),
ptr_C0(args.ref_C0.data()),
ptr_Scale0(args.ref_Scale0.data()),
ptr_Bias0(args.ref_Bias0.data()),
iterator_B1(args.problem_size_1, args.ref_B1.layout()),
ptr_B1(args.ref_B1.data()),
iterator_C1(ConvOutputIteratorParameter::layout(args.ref_C1)),
ptr_C1(args.ref_C1.data()),
iterator_D1(ConvOutputIteratorParameter::layout(args.ref_D1)),
ptr_D1(args.ref_D1.data()),
output_op_0(args.output_op_0),
output_op_1(args.output_op_1),
semaphore(semaphore),
split_k_mode(args.split_k_mode)
{
gemm_k_iterations_0 = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape0::kK, args.problem_size_0);
gemm_k_iterations_1 = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape1::kK, args.problem_size_1);
ThreadblockSwizzle threadblock_swizzle;
grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
implicit_gemm_problem_size_0,
{ThreadblockShape0::kM, ThreadblockShape0::kN, ThreadblockShape0::kK},
args.problem_size_0.split_k_slices);
swizzle_log_tile = ThreadblockSwizzle().get_log_tile(grid_tiled_shape);
}
};
/// Shared memory storage structure
union SharedStorage {
typename B2bMma::B2bMmaSharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
B2bImplicitGemmConvolution() { }
/// Executes one ImplicitGEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) {
return;
}
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename B2bMma::IteratorA0 iterator_A0(
params.iterator_A0,
params.problem_size_0,
params.ptr_A0,
thread_idx,
MatrixCoord(
threadblock_tile_idx.m() * B2bMma::Shape0::kM,
threadblock_tile_idx.k() * B2bMma::Shape0::kK
)
);
typename B2bMma::IteratorB0 iterator_B0(
params.iterator_B0,
params.problem_size_0,
params.ptr_B0,
thread_idx,
MatrixCoord(
threadblock_tile_idx.k() * B2bMma::Shape0::kK,
threadblock_tile_idx.n() * B2bMma::Shape0::kN
)
);
typename B2bMma::IteratorB1 iterator_B1(
params.iterator_B1,
params.problem_size_1,
params.ptr_B1,
thread_idx,
MatrixCoord(
threadblock_tile_idx.k() * B2bMma::Shape1::kK,
threadblock_tile_idx.n() * B2bMma::Shape1::kN
)
);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
// Construct iterators to accumulator scale/bias vector
typename B2bMma::IteratorAccumulatorScaleBias iterator_Scale0(
params.ptr_Scale0,
{1, params.problem_size_0.K},
thread_idx,
warp_idx,
MatrixCoord(
0, threadblock_tile_idx.n() * B2bMma::Shape0::kN
)
);
typename B2bMma::IteratorAccumulatorScaleBias iterator_Bias0(
params.ptr_Bias0,
{1, params.problem_size_0.K},
thread_idx,
warp_idx,
MatrixCoord(
0, threadblock_tile_idx.n() * B2bMma::Shape0::kN
)
);
//
// Main loop
//
EpilogueOutputOp0 output_op_0(params.output_op_0);
// Construct thread-scoped matrix multiply
B2bMma b2bMma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename B2bMma::FragmentC0 src_accum;
typename B2bMma::FragmentC1 accumulators;
src_accum.clear();
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
b2bMma(params.gemm_k_iterations_0, accumulators, iterator_A0, iterator_B0,
iterator_Scale0, iterator_Bias0, iterator_B1, src_accum, output_op_0);
//
// Epilogue
//
EpilogueOutputOp1 output_op_1(params.output_op_1);
// Construct the semaphore.
int block_idx = threadblock_tile_idx.m() + threadblock_tile_idx.n() * params.grid_tiled_shape.m();
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// Compute logical position within grid
threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// If performing a reduction via split-K, fetch the initial synchronization
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op_1.set_k_partition(threadblock_tile_idx.k(), params.grid_tiled_shape.k());
}
MatrixCoord threadblock_offset(
threadblock_tile_idx.m() * B2bMma::Shape1::kM,
threadblock_tile_idx.n() * B2bMma::Shape1::kN
);
// Tile iterator writing to destination tensor
typename Epilogue::OutputTileIterator iterator_D1(
params.iterator_D1,
params.ptr_D1,
ConvOutputIteratorParameter::extent(params.problem_size_1),
thread_idx,
threadblock_offset
);
// Tile iterator reading from source accumulator tensor
typename Epilogue::OutputTileIterator iterator_C1(
params.iterator_C1,
params.ptr_C1,
ConvOutputIteratorParameter::extent(params.problem_size_1),
thread_idx,
threadblock_offset
);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_idx.k()) {
iterator_C1 = iterator_D1;
}
semaphore.wait(threadblock_tile_idx.k());
__threadfence();
}
// Each split-k-slice writes to a unique tensor location
else if (params.split_k_mode == SplitKMode::kParallel) {
iterator_D1.add_pointer_offset(threadblock_tile_idx.k() *
cutlass::conv::implicit_gemm_tensor_c_size(ConvOperator, params.problem_size_1));
}
// Run efficient epilogue
epilogue(output_op_1, iterator_D1, accumulators, iterator_C1);
//
// Release the semaphore
//
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_idx.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_idx.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/kernel/b2b_implicit_gemm_convolution.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/kernel/b2b_implicit_gemm_convolution.h",
"repo_id": "examples",
"token_count": 7161
} | 9 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "threadblock/b2b_mma_base.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape0_,
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape1_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy0_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy1_,
/// Shared Memory Accumulator Iterator
typename SmemAccumulatorIterator0_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class B2bMmaBaseSmemAccumulator :
public B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages> {
public:
///< Base class
using Base = B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape0 = Shape0_;
using Shape1 = Shape1_;
///< Policy describing tuning details
using Policy0 = Policy0_;
using Policy1 = Policy1_;
using SmemAccumulatorIterator0 = SmemAccumulatorIterator0_;
//
// Nested structs
//
/// Shared storage object needed by accumulator
template<
typename Shape_,
typename Element_,
typename Layout_,
typename Padding_
>
class AccumulatorSharedStorage {
public:
//
// Type definitions
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using Padding = Padding_;
/// Tensor reference to the accumulator
using TensorRefAccum = TensorRef<Element, Layout>;
/// Shape of the accumulator matrix in shared memory
using ShapeAccum = MatrixShape<Shape::kM + Padding::kRow,
Shape::kN + Padding::kColumn>;
public:
//
// Data members
//
/// Buffer for accumulator
AlignedBuffer<Element, ShapeAccum::kCount> accum;
public:
//
// Methods
//
/// Returns a layout object for the Accum matrix
CUTLASS_DEVICE
static Layout LayoutAccum() {
return Layout::packed({ShapeAccum::kRow, ShapeAccum::kColumn});
}
/// Returns a TensorRef to the Accumulator
CUTLASS_HOST_DEVICE
TensorRefAccum accum_ref() {
return TensorRefAccum{accum.data(), LayoutAccum()};
}
};
using AccumulatorSharedStorage0 = AccumulatorSharedStorage<
Shape0, typename SmemAccumulatorIterator0::Element,
typename SmemAccumulatorIterator0::TensorLayout,
typename SmemAccumulatorIterator0::Padding>;
struct B2bMmaSharedStorage {
typename Base::B2bMmaSharedStorage b2b_mma_shared_storage;
AccumulatorSharedStorage0 accumulator_shared_storage0;
};
public:
/// Construct from tensor references
CUTLASS_DEVICE
B2bMmaBaseSmemAccumulator(
///< Shared storage needed for internal use by threadblock-scoped GEMM
B2bMmaSharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage.b2b_mma_shared_storage, thread_idx, warp_idx, lane_idx) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h",
"repo_id": "examples",
"token_count": 1950
} | 10 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
The convolution version of 12_gemm_bias_relu. Similarly, we put bias vector in Operand C and the
rest is the same as normal convolution.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 4;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue, // Data type for alpha in linear combination
cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // alpha X C + per channel bias
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
int run() {
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
{1, 7, 7, 512}, // activation
{512, 3, 3, 512}, // filter
{1, 1, 1, 1}, // padding
{1, 1}, // striding
{1, 1}, // dilation
cutlass::conv::Mode::kCrossCorrelation, // mode (convolution or cross-correlation)
1 // split-k slices
);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(problem_size.activation_extent());
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(problem_size.filter_extent());
// Create tensor C with dimensions 1x1x1xk which is the bias vector
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias({1, 1, 1, problem_size.K});
// Create tensor D used to store output from CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(problem_size.output_extent());
// Create matrix D with dimensions M x N used to store output from reference
// kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(problem_size.output_extent());
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_bias.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c_bias.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(), // <- reference to tensor A on device
tensor_b.device_ref(), // <- reference to tensor B on device
// tensor C is treated as the bias vector. We can enable the CONV
// to project away the N, H, W dimension by setting the stride to zero.
{tensor_c_bias.device_data(), LayoutOutput::Stride(0)},
tensor_d.device_ref(), // <- reference to tensor D on device
{alpha} };
// Instantiate CUTLASS kernel depending on templates
ImplicitGemm implicit_gemm_op;
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check the problem size is supported or not
cutlass::Status status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = implicit_gemm_op();
CUTLASS_CHECK(status);
//
// Create instantiation for device reference conv kernel
//
// Launch device reference to compute strictly the product A * B
cutlass::reference::device::Conv2d<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>>
(
cutlass::conv::Operator::kFprop,
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c_bias.device_ref(),
tensor_ref_d.device_ref(),
alpha, ElementComputeEpilogue(0)
);
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Compute bias + relu in host code
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
tensor_ref_d.at({n, p, q, k}) =
std::max(ElementOutput(0),
ElementOutput(tensor_ref_d.at({n, p, q, k}) +
tensor_c_bias.at({0, 0, 0, k})));
}
}
}
}
// Check if output from CUTLASS kernel and reference kernel are equal or not
std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(),
tensor_ref_d.host_view())
? "Passed"
: "Failed")
<< std::endl;
CUTLASS_CHECK(status);
return 0;
}
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
return run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/17_fprop_per_channel_bias/fprop_per_channel_bias.cu/0 | {
"file_path": "examples/17_fprop_per_channel_bias/fprop_per_channel_bias.cu",
"repo_id": "examples",
"token_count": 4569
} | 11 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to fuse per channel scale+bias+relu of the activations
into the 3D fprop mainloop.
Compared with original 3D fprop kernel, this example has two more vectors, one for
the scale and one for the bias. The length of the vectors is the same as the
activation channel number. This kernel loads the vectors when the associated
activation channels are loaded in the mainloop. Between reading the
activations and scale/bias data from the shared memory and calling tensor core
instructions, scale+bias+relu is computed in the register file.
This example is customized for Ampere 16816 fp16 tensor core instruction.
Changing to different data types or different tensor core instruction require
source code changing. See
include/cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h for more
technical details.
This example is modified based on 25_ampere_fprop_mainloop_fusion. The command
line is the same.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv3d_fprop_fusion.h"
#include "cutlass/conv/device/implicit_gemm_convolution_fusion.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementInputScaleBias = cutlass::half_t; // Data type of elements in input sclae and bias vectors
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNDHWC;
using LayoutInputB = cutlass::layout::TensorNDHWC;
using LayoutInputScaleBias = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::TensorNDHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 4;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv3dFpropFusionKernel = typename cutlass::conv::kernel::DefaultConv3dFpropFusion<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementInputScaleBias, LayoutInputScaleBias,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemmFusion = cutlass::conv::device::ImplicitGemmConvolutionFusion<Conv3dFpropFusionKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor5DCoord input_size;
cutlass::Tensor5DCoord filter_size;
cutlass::Coord<3> padding;
cutlass::Coord<3> conv_stride;
cutlass::Coord<3> dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32, 32),
filter_size(32, 3, 3, 3, 32),
padding(cutlass::make_Coord(1, 1, 1)),
conv_stride(cutlass::make_Coord(1, 1, 1)),
dilation(cutlass::make_Coord(1, 1, 1)),
reference_check(true),
measure_performance(false),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding[0] != filter_size.d() / 2) ||
(padding[1] != filter_size.h() / 2) ||
(padding[2] != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor5DCoord input_size,
cutlass::Tensor5DCoord filter_size,
cutlass::Coord<3> stride) {
this->input_size = input_size;
this->filter_size = filter_size;
conv_stride = stride;
padding[0] = filter_size.d() / 2;
padding[1] = filter_size.h() / 2;
padding[2] = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("d", input_size.d());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("t", filter_size.d());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.d() == 3 && filter_size.h() == 3 && filter_size.w() == 3) {
padding = cutlass::make_Coord(1, 1, 1);
}
else {
filter_size.d() = 1;
filter_size.h() = 1;
filter_size.w() = 1;
padding = cutlass::make_Coord(0, 0, 0);
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "25_ampere_3d_fprop_mainloop_fusion example\n\n"
<< " This example fuses scale+bias+relu of the activations into Ampere's\n"
<< " Tensor Core operators on F16 data types to compute\n"
<< " forward convolution on tensors of layout NDHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n <int> Input tensor extent N\n"
<< " --d <int> Input tensor extent D\n"
<< " --h <int> Input tensor extent H\n"
<< " --w <int> Input tensor extent W\n"
<< " --c <int> Input tensor extent C\n"
<< " --k <int> Filter extent K\n"
<< " --t <int> Filter extent T\n"
<< " --r <int> Filter extent R\n"
<< " --s <int> Filter extent S\n\n"
<< " --alpha <float> Epilogue scalar alpha\n"
<< " --beta <float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations <int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag <string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./25_ampere_3d_fprop_mainloop_fusion --n=32 --d=96 --h=96 --w=96 --c=64 --k=64 --t=1 --r=1 --s=1\n\n"
<< "$ ./25_ampere_3d_fprop_mainloop_fusion --n=1 --d=224 --h=224 --w=224 --c=32 --k=32 --t=3 --r=3 --s=3 --ref-check\n\n"
<< "$ ./25_ampere_3d_fprop_mainloop_fusion --n=19 --d=94 --h=96 --w=96 --c=128 --k=128 --t=1 --r=1 --s=1\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor5DCoord output_size() const {
return cutlass::Tensor5DCoord(
input_size.n(),
(input_size.d() + padding[0] + padding[0] - filter_size.d()) / conv_stride[0] + 1,
(input_size.h() + padding[1] + padding[1] - filter_size.h()) / conv_stride[1] + 1,
(input_size.w() + padding[2] + padding[2] - filter_size.w()) / conv_stride[2] + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.d() * filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,D,H,W,C,K,T,R,S,Stride_D,Stride_H,Stride_W,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.d() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.d() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.conv_stride[0] << ","
<< options.conv_stride[1] << ","
<< options.conv_stride[2] << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_transformed_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_a_scale({1, options.input_size.c()});
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_a_bias({1, options.input_size.c()});
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill scale vector for tensor A on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a_scale.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill bias vector for tensor A on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a_bias.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0);
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_a_scale.sync_device();
tensor_a_bias.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv3dProblemSize with user defined output size
cutlass::conv::Conv3dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
typename ImplicitGemmFusion::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_a_scale.device_ref(),
tensor_a_bias.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemmFusion implicit_gemm_fusion_op;
size_t workspace_size = implicit_gemm_fusion_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_fusion_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_fusion_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on device...\n";
// Compute scale + bias + relu in host code
for (int n = 0; n < options.input_size.n(); ++n) {
for (int d = 0; d < options.input_size.d(); ++d) {
for (int h = 0; h < options.input_size.h(); ++h) {
for (int w = 0; w < options.input_size.w(); ++w) {
for (int c = 0; c < options.input_size.c(); ++c) {
tensor_transformed_a.at({n, d, h, w, c}) = std::max(
ElementOutput(0), ElementOutput(tensor_a.at({n, d, h, w, c}) *
tensor_a_scale.at({0, c}) +
tensor_a_bias.at({0, c})));
}
}
}
}
}
tensor_transformed_a.sync_device();
// Compute with reference implementation
cutlass::reference::device::Conv3dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_transformed_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_ref_d.device_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_d.sync_host();
tensor_ref_d.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "25_ampere_3d_fprop_mainloop_fusion"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv3dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "This test must run on SM80 or above.\n";
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {34, 18};
struct Benchmark {
int d, h, w, c, k, t, r, s, stride_d, stride_h, stride_w;
} layers[] = {
{56, 56, 56, 64, 256, 1, 1, 1, 1, 1, 1},
{56, 56, 56, 64, 64, 1, 1, 1, 1, 1, 1},
{56, 56, 56, 64, 64, 3, 3, 3, 1, 1, 1},
{56, 56, 56, 256, 64, 1, 1, 1, 1, 1, 1},
{56, 56, 56, 256, 512, 1, 1, 1, 2, 2, 2},
{56, 56, 56, 256, 128, 1, 1, 1, 1, 1, 1},
{56, 56, 56, 128, 128, 3, 3, 3, 2, 2, 2},
{28, 28, 28, 128, 512, 1, 1, 1, 1, 1, 1},
{28, 28, 28, 512, 128, 1, 1, 1, 1, 1, 1},
{28, 28, 28, 128, 128, 3, 3, 3, 1, 1, 1},
{28, 28, 28, 512, 1024, 1, 1, 1, 2, 2, 2},
{28, 28, 28, 512, 256, 1, 1, 1, 1, 1, 1},
{28, 28, 28, 256, 256, 3, 3, 3, 2, 2, 2},
{14, 14, 14, 256, 1024, 1, 1, 1, 1, 1, 1},
{14, 14, 14, 1024, 256, 1, 1, 1, 1, 1, 1},
{14, 14, 14, 256, 256, 3, 3, 3, 1, 1, 1},
{14, 14, 14, 1024, 2048, 1, 1, 1, 2, 2, 2},
{14, 14, 14, 1024, 512, 1, 1, 1, 1, 1, 1},
{14, 14, 14, 512, 512, 3, 3, 3, 2, 2, 2},
{ 7, 7, 7, 512, 2048, 1, 1, 1, 1, 1, 1},
{ 7, 7, 7, 2048, 512, 1, 1, 1, 1, 1, 1},
{ 7, 7, 7, 512, 512, 3, 3, 3, 1, 1, 1},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.d, layer.h, layer.w, layer.c},
{layer.k, layer.t, layer.r, layer.s, layer.c},
cutlass::make_Coord(layer.stride_d, layer.stride_h, layer.stride_w));
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/25_ampere_fprop_mainloop_fusion/ampere_3d_fprop_mainloop_fusion.cu/0 | {
"file_path": "examples/25_ampere_fprop_mainloop_fusion/ampere_3d_fprop_mainloop_fusion.cu",
"repo_id": "examples",
"token_count": 10476
} | 12 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/arch/mma.h"
////////////////////////////////////////////////////////////////////////////////
// Some helper functions
////////////////////////////////////////////////////////////////////////////////
#define DISPATCH_TYPES(tensor, func) \
{ \
if (query.scalar_type() == at::ScalarType::Float) { \
using scalar_t = float; \
func(); \
} else if (query.scalar_type() == at::ScalarType::Half) { \
using scalar_t = cutlass::half_t; \
func(); \
} else if (query.scalar_type() == at::ScalarType::BFloat16) { \
using scalar_t = cutlass::bfloat16_t; \
func(); \
} else { \
XFORMERS_CHECK(false, "Only fp32, half & bf16 supported at the moment"); \
} \
}
#define DISPATCH_BOOL(BOOL_V, BOOL_NAME, F) \
{ \
if (BOOL_V) { \
constexpr bool BOOL_NAME = true; \
F(); \
} else { \
constexpr bool BOOL_NAME = false; \
F(); \
} \
}
#define DISPATCH_ARCHTAG(CC, func) \
{ \
if (CC >= 80) { \
using ArchTag = cutlass::arch::Sm80; \
func(); \
} else if (CC >= 75) { \
using ArchTag = cutlass::arch::Sm75; \
func(); \
} else if (CC >= 70) { \
using ArchTag = cutlass::arch::Sm70; \
func(); \
} else if (CC >= 50) { \
using ArchTag = cutlass::arch::Sm50; \
func(); \
} else { \
XFORMERS_CHECK( \
false, \
"Your device is too old. We require compute capability >= 50"); \
} \
}
#define CHECK_NOSPARSE_CONTIGUOUS_CUDA(TENSOR) \
XFORMERS_CHECK(TENSOR.is_cuda(), #TENSOR " must be a CUDA tensor"); \
XFORMERS_CHECK(!TENSOR.is_sparse(), #TENSOR " must be a dense tensor"); \
XFORMERS_CHECK(TENSOR.is_contiguous());
#define CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(TENSOR) \
XFORMERS_CHECK(TENSOR.is_cuda(), #TENSOR " must be a CUDA tensor"); \
XFORMERS_CHECK(!TENSOR.is_sparse(), #TENSOR " must be a dense tensor"); \
XFORMERS_CHECK( \
TENSOR.stride(-1) == 1, #TENSOR ": last dimension must be contiguous");
#ifdef TORCH_CHECK
#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \
XFORMERS_CHECK( \
uint64_t(PTR) % ALIGNMENT == 0, #PTR " is not correctly aligned")
#define XFORMERS_CHECK TORCH_CHECK
#elif defined(__CUDACC_RTC__)
#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \
if (!(uint64_t(PTR) % ALIGNMENT == 0)) { \
return false; \
}
#define XFORMERS_CHECK(COND, ERR) \
if (!(COND)) { \
return false; \
}
#else
#include <iostream>
#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \
if (!(uint64_t(PTR) % ALIGNMENT == 0)) { \
std::cerr << #PTR " is not correctly aligned\n"; \
return false; \
}
#define XFORMERS_CHECK(COND, ERR) \
if (!(COND)) { \
std::cerr << "'" #COND "' failed: " << ERR << "\n"; \
return false; \
}
#endif
#define ASSIGN_CHECK_OVERFLOW(A, B) \
{ \
A = B; \
XFORMERS_CHECK( \
B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \
}
namespace gemm_kernel_utils {
template <typename integer>
constexpr CUTLASS_HOST_DEVICE integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
template <typename integer>
constexpr CUTLASS_HOST_DEVICE integer align_up(integer n, integer m) {
return ((n + m - 1) / m) * m;
}
////////////////////////////////////////////////////////////////////////////////
// Determine the type of GEMM we do (TensorCores or not, Shapes ...)
// TODO: Maybe we could rely on Cutlass's DefaultGemm templates
////////////////////////////////////////////////////////////////////////////////
// Fallback to Simt (FMA on cuda cores) if not in a special case below
template <typename ArchTag, typename scalar_t_, typename Enable = void>
struct DefaultGemmType {
static constexpr int ThreadK = 8;
static constexpr int WarpK = 8;
static constexpr int kMinimumAlignment = 1;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using OpClass = cutlass::arch::OpClassSimt;
using Operator = cutlass::arch::OpMultiplyAdd;
};
// Specialization for tensorcores with f32
template <typename ArchTag>
struct DefaultGemmType<
ArchTag,
float,
typename cutlass::platform::enable_if<
ArchTag::kMinComputeCapability >= 80>::type> {
static constexpr int ThreadK = 32;
static constexpr int WarpK = 32;
static constexpr int kMinimumAlignment = 4;
using OpClass = cutlass::arch::OpClassTensorOp;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Operator = cutlass::arch::OpMultiplyAddFastF32;
};
// Specialization for tensorcores with f16/bf16 - Sm75+
template <typename ArchTag, typename scalar_t>
struct DefaultGemmType<
ArchTag,
scalar_t,
typename cutlass::platform::enable_if<
ArchTag::kMinComputeCapability >= 75 &&
cutlass::sizeof_bits<scalar_t>::value == 16>::type> {
static constexpr int ThreadK = 32;
static constexpr int WarpK = 32;
static constexpr int kMinimumAlignment = 4;
using OpClass = cutlass::arch::OpClassTensorOp;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Operator = cutlass::arch::OpMultiplyAdd;
};
// Specialization for tensorcores with f16 - Volta
template <>
struct DefaultGemmType<cutlass::arch::Sm70, cutlass::half_t, void> {
static constexpr int ThreadK = 32;
static constexpr int WarpK = 32;
static constexpr int kMinimumAlignment = 2;
using OpClass = cutlass::arch::OpClassTensorOp;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Operator = cutlass::arch::OpMultiplyAdd;
};
// Enables to do
// `auto x = kCondition ? fa(arg) : fb(arg)`
// when `fa` and `fb` have different types
template <bool kVal, typename TA, typename TB>
struct call_conditional;
template <typename TA, typename TB>
struct call_conditional<true, TA, TB> {
template <typename Arg>
static CUTLASS_HOST_DEVICE auto apply(TA ta, TB tb, Arg arg)
-> decltype(ta(arg)) {
return ta(arg);
}
};
template <typename TA, typename TB>
struct call_conditional<false, TA, TB> {
template <typename Arg>
static CUTLASS_HOST_DEVICE auto apply(TA ta, TB tb, Arg arg)
-> decltype(tb(arg)) {
return tb(arg);
}
};
////////////////////////////////////////////////////////////////////////////////
// Mark a variable as warp-uniform - enables some compiler optimizations
// The cheapest way to do it is just to broadcast it from lane 0
////////////////////////////////////////////////////////////////////////////////
template <typename T>
CUTLASS_DEVICE T warp_uniform(T value) {
struct {
union {
T value;
uint32_t asInt;
};
} p;
p.value = value;
p.asInt = __shfl_sync(0xffffffff, (unsigned)p.asInt, 0);
return p.value;
}
template <typename T>
CUTLASS_DEVICE T* warp_uniform(T* ptr) {
struct {
union {
T* ptr;
uint32_t asInt[2];
};
} p;
p.ptr = ptr;
p.asInt[0] = warp_uniform(p.asInt[0]);
p.asInt[1] = warp_uniform(p.asInt[1]);
return p.ptr;
}
} // namespace gemm_kernel_utils
| examples/41_fused_multi_head_attention/gemm_kernel_utils.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/gemm_kernel_utils.h",
"repo_id": "examples",
"token_count": 5441
} | 13 |
This example provides utilities for generating back-to-back (B2B) GEMMs using CUTLASS.
## Quick start
A configuration file containing the GEMMs to be fused together is located in [config.json](config.json). Edit
this to change the configuration that you would like to run.
```shell
cd ir_gen
# Set up basic variables
out_dir=directory_to_emit_files
cutlass_dir=$(pwd)/../../..
config_file=$(pwd)/../config.json
# Generate code for GEMMs described in `config_file`
./generate.sh $config_file $out_dir $cutlass_dir
# Build the generated code
cd $out_dir
mkdir build && cd build
cmake .. -DGPU_ARCHS="75;80"
make -j
# Run the generated code with M=1024 K0=32 and Batch=1
./sample 1024 32 1
```
## Current restrictions
This experimental example has the following restrictions:
1. N tile should not exceed 256, or register spilling will occur.
2. Only FP16 is supported currently
3. Matrix A must be row major, matrix B must be column major, matrices C and D must be row major.
## Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| examples/44_multi_gemm_ir_and_codegen/README.md/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/README.md",
"repo_id": "examples",
"token_count": 736
} | 14 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
import gen_ir as ir
class gen_turing_impl:
def __init__(self,fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.class_name = gen_class_name
self.gen_class_name = gen_class_name + "_turing_impl"
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
self.gen_turing_unfused = gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
def gen_using(self):
code_using = "using b2b_gemm = typename cutlass::gemm::device::" + self.class_name + "<cutlass::half_t>;"
return code_using + "\n"
def gen_initialize(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " alpha", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(1);\n"
beta = "(1)"
if helper.get_epilogue_add_bias_or_not(self.fuse_gemm_info[i]) is False:
beta = "(0)"
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " beta", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + beta + ";\n"
k_str = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
k_str = "K0"
code_this += helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(M, " + str(self.fuse_gemm_info[i]['mnk'][1]) + ", " + k_str + ");\n"
code += code_this
code += "typename b2b_gemm::Arguments arguments{\n"
for i in range(self.b2b_num):
code += " " + helper.var_idx("problem_size_", i) + ",\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("A", 0) + "), " + helper.var_idx("problem_size_", 0) + ".k()},\n"
for i in range(self.b2b_num):
ldmB = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
ldmB = "K0"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
ldmC = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("B", i) + "), " + ldmB + "},\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("C", i) + "), " + ldmC + "},\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("D", self.b2b_num -1) + "), " + helper.var_idx("problem_size_", self.b2b_num - 1) + ".n()},\n"
for i in range(self.b2b_num):
code += " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_name = helper.var_idx("Epilogue", i) + "_" + epilogue_arg[1]
code += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_name) + ")"
code += "},\n"
code += " " + "Batch};\n\n"
code += " " "b2b_gemm gemm_op;\n"
code += " " + "gemm_op.initialize(arguments);\n"
return code + "\n"
def gen_run(self):
code = " " + "gemm_op(stream);\n"
return code
def gen_wrapper(self):
code_body = ""
arg_lists = []
arg_lists.append(["int", "M"])
arg_lists.append(["int", "K0"])
arg_lists.append(["int", "Batch"])
arg_lists.append(["void*", helper.var_idx("A", 0)])
for i in range(self.b2b_num):
arg_lists.append(["void*", helper.var_idx("B", i)])
arg_lists.append(["void*", helper.var_idx("C", i)])
arg_lists.append(["void*", helper.var_idx("D", i)])
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
arg_lists.append([arg_tp, arg_name])
if self.b2b_num == 1:
code_body += self.gen_turing_unfused.gen_using(False) #False -> Turing, True -> Volta
code_body += self.gen_turing_unfused.gen_initialize()
code_body += self.gen_turing_unfused.gen_run()
else:
code_body += self.gen_using()
code_body += self.gen_initialize()
code_body += self.gen_run()
code = ir.gen_func(self.gen_class_name, arg_lists, code_body)
return code
def gen_code(self):
code = self.gen_wrapper()
helper.write_2_headfile("turing_impl.h", self.output_dir, self.user_header_file + "\n" + code)
class gen_volta_turing_fuse_act_impl:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name + "_volta_impl"
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
def perf_tiling(self, layer_mnk):
mnk = layer_mnk[:]
block_tile = mnk[:]
block_tile[2] = 32 # force the K tile to be 32
# M tile gen
block_tile[0] = 32
# N tile gen
if mnk[1] > 128:
block_tile[1] = 256
elif mnk[1] > 64:
block_tile[1] = 128
elif mnk[1] > 32:
block_tile[1] = 64
else :
block_tile[1] = 32
warp_tile = block_tile[:]
if block_tile[1] == 256:
warp_tile[1] = 64
elif block_tile[1] == 128:
warp_tile[1] = 32
elif block_tile[1] == 64:
warp_tile[1] = 32
else :
warp_tile[1] = 32
warp_tile[0] = 32
return block_tile, warp_tile
def process_epilogue(self, epilogue_tp, n, C_tp, Acc_tp):
epilogue_setted_type = epilogue_tp
cutlass_epilogue_name = "LinearCombinationRelu"
if epilogue_setted_type.lower() == 'leakyrelu':
cutlass_epilogue_name = "LinearCombinationLeakyRelu"
elif epilogue_setted_type.lower() == 'identity':
cutlass_epilogue_name = "LinearCombination"
n_mod_8 = n % 4
N_align_elements = 1
if n_mod_8 == 0:
N_align_elements = 8
elif n_mod_8 == 4:
N_align_elements = 4
elif n_mod_8 == 2 or n_mod_8 == 6:
N_align_elements = 2
epilogue_str = "cutlass::epilogue::thread::" + cutlass_epilogue_name+ "<" + C_tp + ", " + str(N_align_elements) + ", " + Acc_tp + ", " + Acc_tp + ">"
return epilogue_str
def gen_using(self, volta = True):
code_using = ""
volta_arch = "cutlass::arch::Sm70"
volta_tc = "cutlass::gemm::GemmShape<8, 8, 4>"
turing_arch = "cutlass::arch::Sm75"
turing_tc = "cutlass::gemm::GemmShape<16, 8, 8>"
arch = ""
tc = ""
if volta:
arch = volta_arch
tc = volta_tc
else:
arch = turing_arch
tc = turing_tc
for i in range(self.b2b_num):
k = self.fuse_gemm_info[i]['mnk'][2]
k_mod_8 = k % 4
ab_ldm = 1
if k_mod_8 == 0:
ab_ldm = 8
elif k_mod_8 == 4:
ab_ldm = 4
elif k_mod_8 == 2 or k_mod_8 == 6:
ab_ldm = 2
block_tile, warp_tile = self.perf_tiling(self.fuse_gemm_info[i]['mnk'])
this_gemm_config = helper.var_idx("using Gemm", i) + " = cutlass::gemm::device::GemmBatched<\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + ",\n"
this_gemm_config += " " + "cutlass::arch::OpClassTensorOp,\n"
this_gemm_config += " " + arch + ",\n"
this_gemm_config += " " + "cutlass::gemm::GemmShape<" + str(block_tile[0]) + ", " + str(block_tile[1]) + ", " + str(block_tile[2]) + ">,\n"
this_gemm_config += " " + "cutlass::gemm::GemmShape<" + str(warp_tile[0]) + ", " + str(warp_tile[1]) + ", " + str(warp_tile[2]) + ">,\n"
this_gemm_config += " " + tc + ",\n"
this_gemm_config += " " + self.process_epilogue(helper.get_epilogue_tp(self.fuse_gemm_info[i]), self.fuse_gemm_info[i]['mnk'][1], helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']), helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp'])) + ",\n"
this_gemm_config += " " + "cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,\n"
this_gemm_config += " " + "2,\n"
this_gemm_config += " " + str(ab_ldm) + ",\n"
this_gemm_config += " " + str(ab_ldm) + ">;\n"
code_using += this_gemm_config + "\n"
return code_using + "\n"
def gen_initialize(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
N_str = str(self.fuse_gemm_info[i]['mnk'][1])
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " alpha", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(1);\n"
beta = "(1)"
if helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i]) is False:
beta = "(0)"
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " beta", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + beta + ";\n"
k_str = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
k_str = "K0"
code_this += helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(M, " + str(self.fuse_gemm_info[i]['mnk'][1]) + ", " + k_str + ");\n"
code_this += helper.var_idx("typename Gemm", i) + helper.var_idx("::Arguments arguments_", i) + "{\n"
code_this += " " + helper.var_idx("problem_size_", i) + ",\n"
ldmA = k_str
ldmB = k_str
ldmC = str(self.fuse_gemm_info[i]['mnk'][1])
ldmBias = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
if self.fuse_gemm_info[i]['A_format'] is 'Col':
ldmA = "M"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
if self.fuse_gemm_info[i]['C_format'] is 'Col':
ldmC = "M"
if i == 0:
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("A", i) + "), " + ldmA + "}, " + "M * " + ldmA + ",\n"
else:
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("D", i - 1) + "), " + ldmA + "}, " + "M * " + ldmA + ",\n"
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("B", i) + "), " + ldmB + "}, " + N_str + " * " + ldmB + ",\n"
M_bias = str(helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])[0])
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("C", i) + "), " + ldmBias + "}, " + M_bias + " * " + N_str + ",\n"
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("D", i) + "), " + ldmC + "}, " + "M * " + ldmC + ",\n"
code_this += " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_name = helper.var_idx("Epilogue", i) + "_" + epilogue_arg[1]
code_this += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_name) + ")"
code_this += " },\n"
code_this += " " + "Batch};\n"
code_this += " " + helper.var_idx("Gemm", i) + helper.var_idx(" gemm_op_", i) + ";\n"
code_this += " " + helper.var_idx("gemm_op_", i) + helper.var_idx(".initialize(arguments_", i) + ", nullptr);\n"
code += code_this + "\n"
return code + "\n"
def gen_run(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
code_this += " " + helper.var_idx("gemm_op_", i) + "(stream);\n"
code += code_this
return code
def gen_wrapper(self):
code_body = ""
arg_lists = []
arg_lists.append(["int", "M"])
arg_lists.append(["int", "K0"])
arg_lists.append(["int", "Batch"])
arg_lists.append(["void*", helper.var_idx("A", 0)])
for i in range(self.b2b_num):
arg_lists.append(["void*", helper.var_idx("B", i)])
arg_lists.append(["void*", helper.var_idx("C", i)])
arg_lists.append(["void*", helper.var_idx("D", i)])
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
arg_lists.append([arg_tp, arg_name])
code_body += self.gen_using()
code_body += self.gen_initialize()
code_body += self.gen_run()
code = ir.gen_func(self.gen_class_name, arg_lists, code_body)
return code
def gen_code(self):
code = self.gen_wrapper()
helper.write_2_headfile("volta_impl.h", self.output_dir, self.user_header_file + "\n" + code)
class gen_one_API:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
self.gen_volta = gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
self.gen_turing = gen_turing_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
def gen_CUTLASS_irrelevant_API(self):
code = ""
code += "#include <cuda_runtime.h>\n"
code += "#include <assert.h>\n"
param_name = "Fused" + str(self.b2b_num) + "xGemm_"
for i in range(self.b2b_num):
param_name += str(self.fuse_gemm_info[i]['mnk'][1]) + "_"
param_name += "Params"
params = ""
params += " " + "int M;\n"
params += " " + "int K0;\n"
params += " " + "int Batch;\n"
params += " " + "const void* A0;\n"
for i in range(self.b2b_num):
params += " " + "const void* " + helper.var_idx("B", i) + ";\n"
params += " " + "const void* " + helper.var_idx("C", i) + ";\n"
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
params += " " + arg_tp + " " + arg_name + ";\n"
params += " " + "void* " + helper.var_idx("D", i) + ";\n"
code += ir.gen_struct(param_name, params)
code += "using Param = " + param_name + ";\n"
code += "void one_api( const Param & param, int sm, cudaStream_t stream);\n"
return code
def gen_one_api(self):
code = ""
code += "/* Auto Generated code - Do not edit.*/\n"
code += "#include \"cutlass_irrelevant.h\"\n"
code += "#include \"api.h\"\n"
code += "void one_api( const Param & param, int sm, cudaStream_t stream) {\n"
code += " " + "if (sm == 70) \n"
code += " " + " " + self.gen_class_name + "_volta_impl(param.M, param.K0, param.Batch, const_cast<void*>(param.A0), "
for i in range(self.b2b_num):
code += helper.var_idx("const_cast<void*>(param.B", i) + "), "
code += helper.var_idx("const_cast<void*>(param.C", i) + "), "
code += helper.var_idx("param.D", i) + ", "
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
code += "param." + arg_name + ", "
code += "stream);\n"
code += " " + "else if(sm >= 75) \n"
code += " " + " " + self.gen_class_name + "_turing_impl(param.M, param.K0, param.Batch, const_cast<void*>(param.A0), "
for i in range(self.b2b_num):
code += helper.var_idx("const_cast<void*>(param.B", i) + "), "
code += helper.var_idx("const_cast<void*>(param.C", i) + "), "
code += helper.var_idx("param.D", i) + ", "
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
code += "param." + arg_name + ", "
code += "stream);\n"
code += " " + "else assert(0);\n"
code += "}\n"
return code
def gen_code(self):
turing_code = self.gen_turing.gen_wrapper()
volta_code = self.gen_volta.gen_wrapper()
cutlass_irrelevant_code = self.gen_CUTLASS_irrelevant_API()
one_api_code = self.gen_one_api()
with open(self.output_dir + "one_api.cu", "w+") as f:
f.write(one_api_code)
helper.write_2_headfile("cutlass_irrelevant.h", self.output_dir, cutlass_irrelevant_code)
helper.write_2_headfile("api.h", self.output_dir, self.user_header_file + "\n" + turing_code + volta_code)
| examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_turing_and_volta.py/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_turing_and_volta.py",
"repo_id": "examples",
"token_count": 10964
} | 15 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/threadblock/mma_base.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy0_,
/// B1-specific version of the policy (concept: MmaPolicy)
typename Policy1_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class DualMmaBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Policy describing tuning details
using Policy0 = Policy0_;
using Policy1 = Policy1_;
//
// Dependent types
//
/// Warp-level Mma
using Operator0 = typename Policy0::Operator;
using Operator1 = typename Policy1::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy0::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = GemmShape<Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator0::Policy::MmaShape::kK);
/// Number of stages
static int const kStages = Stages;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator0::ElementA, typename Operator0::LayoutA>;
/// Tensor reference to the B operand
using TensorRefB0 = TensorRef<typename Operator0::ElementB, typename Operator0::LayoutB>;
using TensorRefB1 = TensorRef<typename Operator1::ElementB, typename Operator1::LayoutB>;
static_assert(kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
static_assert((kWarpGemmIterations % 2) == 0,
"Inner loop iteration must be an even number.");
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
class SharedStorage {
public:
//
// Type definitions
//
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<Shape::kM + Policy0::SmemPaddingA::kRow,
Shape::kK * kStages +
Policy0::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB0 =
MatrixShape<Shape::kK * kStages + Policy0::SmemPaddingB::kRow,
Shape::kN + Policy0::SmemPaddingB::kColumn>;
using ShapeB1 =
MatrixShape<Shape::kK * kStages + Policy1::SmemPaddingB::kRow,
Shape::kN + Policy1::SmemPaddingB::kColumn>;
public:
//
// Data members
//
/// Buffer for A operand
AlignedBuffer<typename Operator0::ElementA, ShapeA::kCount> operand_A;
/// Buffer for B operand
AlignedBuffer<typename Operator0::ElementB, ShapeB0::kCount> operand_B0;
AlignedBuffer<typename Operator1::ElementB, ShapeB1::kCount> operand_B1;
public:
//
// Methods
//
/// Returns a layout object for the A matrix
CUTLASS_DEVICE
static typename Operator0::LayoutA LayoutA() {
return Operator0::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator0::LayoutB LayoutB0() {
return Operator0::LayoutB::packed({ShapeB0::kRow, ShapeB0::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator1::LayoutB LayoutB1() {
return Operator1::LayoutB::packed({ShapeB1::kRow, ShapeB1::kColumn});
}
/// Returns a TensorRef to the A operand
CUTLASS_HOST_DEVICE
TensorRefA operand_A_ref() {
return TensorRefA{operand_A.data(), LayoutA()};
}
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB0 operand_B0_ref() {
return TensorRefB0{operand_B0.data(), LayoutB0()};
}
CUTLASS_HOST_DEVICE
TensorRefB1 operand_B1_ref() {
return TensorRefB1{operand_B1.data(), LayoutB1()};
}
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator0::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator0::IteratorB warp_tile_iterator_B0_;
typename Operator1::IteratorB warp_tile_iterator_B1_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
DualMmaBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx),
warp_tile_iterator_B0_(shared_storage.operand_B0_ref(), lane_idx),
warp_tile_iterator_B1_(shared_storage.operand_B1_ref(), lane_idx) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/45_dual_gemm/threadblock/dual_mma_base.h/0 | {
"file_path": "examples/45_dual_gemm/threadblock/dual_mma_base.h",
"repo_id": "examples",
"token_count": 2711
} | 16 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Example of a Hopper gather+GEMM+scatter kernel fusion.
This example fuses gather before GEMM and scatter after GEMM into the same
GEMM kernel. Gather and scatter operation is controled by an index vector
to select rows or columns from A, B, C or D matrices.
Gather/scatter operations are always performed along a strided dimension
in order to preserve vectorized loads/stores. Thus the index vector is
applied to rows of row-major matrices and columns of column-major matrices.
Note that the index vector must contain integers in range [0,X) where
X is one of (M,N,K), depending on selected gather dimension. The problem
shape given to the GEMM kernel must consist of matrix sizes AFTER gather
and BEFORE scatter operations are applied.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <iostream>
#include <random>
#include <numeric>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/util/command_line.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
#include "gather_gemm.hpp"
#include "gather_kernel.cuh"
#include "scatter_epilogue.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
using namespace cute;
namespace example {
// Command line options parsing
struct Options {
bool help = false;
cutlass::gemm::BatchedGemmCoord problem_size = {2048, 2048, 2048, 1};
int index_size = 1024;
int mode = 1; // N-mode gather/scatter by default
float alpha = 1.0f;
float beta = 0.0f;
bool reference_check = true;
int iterations = 20;
bool valid() const {
return problem_size.m() > 0
&& problem_size.n() > 0
&& problem_size.k() > 0
&& problem_size.batch() > 0
&& 0 <= mode && mode < 3
&& index_size <= problem_size.at(mode)
&& iterations > 0;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("batch_size", problem_size.batch());
cmd.get_cmd_line_argument("index_size", index_size);
char const modes[] = {'m', 'n', 'k'};
char mode_input = modes[mode];
cmd.get_cmd_line_argument("mode", mode_input);
mode = int(std::distance(std::begin(modes), std::find(std::begin(modes), std::end(modes), mode_input)));
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("check", reference_check, true);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out <<
"52_hopper_gather_scatter_fusion example\n"
"\n"
" This example uses the CUTLASS Library to fuse gather/scatter of input/output tensors with GEMM.\n"
" It validates and benchmarks the fused kernel against an unfused implementation that executes\n"
" gather+GEMM+scatter in sequence and writes intermediate (gathered) tensors to memory.\n"
" For the unfused implementation two GEMM kernels are considered: default one that uses the same\n"
" schedule and instruction set as the fused one, and an optimized one that utilizes advanced\n"
" features (such as TMA units) that cannot be used by the fused kernel due to hardware constraints."
"\n"
"Options:\n"
" --help If specified, displays this usage statement.\n"
" --m=<int> GEMM M dimension\n"
" --n=<int> GEMM N dimension\n"
" --k=<int> GEMM K dimension\n"
" --batch_size=<int> GEMM batch size\n"
" --index_size=<int> Size of N dimension gather/scatter index\n"
" --mode=<m,n,k> Gather mode (M, N, or K)\n"
" --alpha=<float> GEMM alpha parameter\n"
" --beta=<float> GEMM beta parameter\n"
" --iterations=<int> Number of profiling iterations to perform.\n"
"\n"
"Examples:\n"
"\n"
"$ ./examples/52_hopper_gather_scatter_fusion/52_hopper_gather_scatter_fusion --m=1024 --n=2048 --k=1024 --mode=n --index_size=1024\n";
return out;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template<class ElementA, class LayoutA, class GatherA,
class ElementB, class LayoutB, class GatherB,
class ElementC, class LayoutC, class GatherC,
class ElementD, class LayoutD, class ScatterD,
class ElementAccumulator, class ElementComputeEpilogue>
struct ExampleRunner
{
// Useful aliases
using ProblemShape = Shape<int,int,int,int>;
using StrideA = cutlass::gemm::TagToStrideA_t<LayoutA>;
using StrideB = cutlass::gemm::TagToStrideB_t<LayoutB>;
using StrideC = cutlass::gemm::TagToStrideC_t<LayoutC>;
using StrideD = cutlass::gemm::TagToStrideC_t<LayoutD>;
// Alias to for the epilogue type that supports gather/scatter
using Epilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<
cutlass::epilogue::collective::EpilogueGatherScatter<
StrideC, StrideD,
cutlass::epilogue::thread::LinearCombination<
ElementD, 1,
ElementAccumulator, ElementComputeEpilogue,
cutlass::epilogue::thread::ScaleType::Default,
cutlass::FloatRoundStyle::round_to_nearest, ElementC
>,
cutlass::gemm::EpilogueDefault,
GatherC,
ScatterD
>
>;
// Alias to for the mainloop type
using Mainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 128 / cutlass::sizeof_bits<ElementA>::value,
ElementB, LayoutB, 128 / cutlass::sizeof_bits<ElementB>::value,
ElementAccumulator,
Shape<_128,_128,_64>,
Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelCpAsyncWarpSpecialized
>::CollectiveOp;
using Kernel = cutlass::gemm::kernel::GemmGather<
ProblemShape,
Mainloop,
Epilogue,
void,
GatherA,
GatherB
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<Kernel>;
static constexpr bool DoGatherA = not cutlass::platform::is_same<GatherA, NoGather>::value;
static constexpr bool DoGatherB = not cutlass::platform::is_same<GatherB, NoGather>::value;
static constexpr bool DoGatherC = not cutlass::platform::is_same<GatherC, NoGather>::value;
static constexpr bool DoScatterD = not cutlass::platform::is_same<ScatterD, NoGather>::value;
static constexpr bool GatherAonM = DoGatherA && cutlass::platform::is_same<LayoutA,cutlass::layout::RowMajor>::value;
static constexpr bool GatherAonK = DoGatherA && cutlass::platform::is_same<LayoutA,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherBonN = DoGatherB && cutlass::platform::is_same<LayoutB,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherBonK = DoGatherB && cutlass::platform::is_same<LayoutB,cutlass::layout::RowMajor>::value;
static constexpr bool GatherConM = DoGatherC && cutlass::platform::is_same<LayoutC,cutlass::layout::RowMajor>::value;
static constexpr bool GatherConN = DoGatherC && cutlass::platform::is_same<LayoutC,cutlass::layout::ColumnMajor>::value;
static constexpr bool ScatterDonM = DoScatterD && cutlass::platform::is_same<LayoutD,cutlass::layout::RowMajor>::value;
static constexpr bool ScatterDonN = DoScatterD && cutlass::platform::is_same<LayoutD,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherModeM = GatherAonM || GatherConM || ScatterDonM;
static constexpr bool GatherModeN = GatherBonN || GatherConN || ScatterDonN;
static constexpr bool GatherModeK = GatherAonK || GatherBonK;
static_assert( GatherModeM && !GatherModeN && !GatherModeK ||
!GatherModeM && GatherModeN && !GatherModeK ||
!GatherModeM && !GatherModeN && GatherModeK,
"Only one gather mode (M, N or K) is supported by example runner");
// Construct a reference (non-gather) GEMM kernel type
using MainloopRef = Mainloop;
using EpilogueRef = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<
cutlass::epilogue::collective::DefaultEpilogue<
StrideC, StrideD,
typename Epilogue::ThreadEpilogueOp,
typename Epilogue::EpilogueSchedule
>
>;
using KernelRef = cutlass::gemm::kernel::GemmUniversal<
ProblemShape,
MainloopRef,
EpilogueRef,
void
>;
using GemmRef = cutlass::gemm::device::GemmUniversalAdapter<KernelRef>;
// Construct an optimized reference GEMM kernel type (using TMA)
using EpilogueOpt = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_128,_128,_64>,
Shape<_2,_2,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
ElementAccumulator, ElementComputeEpilogue,
ElementC, LayoutC, 128 / cutlass::sizeof_bits<ElementC>::value,
ElementD, LayoutD, 128 / cutlass::sizeof_bits<ElementD>::value,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using MainloopOpt = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 128 / cutlass::sizeof_bits<ElementA>::value,
ElementB, LayoutB, 128 / cutlass::sizeof_bits<ElementB>::value,
ElementAccumulator,
Shape<_128,_128,_64>,
Shape<_2,_2,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<
static_cast<int>(sizeof(typename EpilogueOpt::SharedStorage))>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using KernelOpt = cutlass::gemm::kernel::GemmUniversal<
ProblemShape,
MainloopOpt,
EpilogueOpt,
void
>;
using GemmOpt = cutlass::gemm::device::GemmUniversalAdapter<KernelOpt>;
// Data members
cutlass::gemm::BatchedGemmCoord problem_size_orig;
cutlass::gemm::BatchedGemmCoord problem_size;
ProblemShape problem_shape_orig;
ProblemShape problem_shape;
cutlass::KernelHardwareInfo hw_info;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
StrideA stride_A_orig;
StrideB stride_B_orig;
StrideC stride_C_orig;
StrideD stride_D_orig;
StrideA stride_A;
StrideB stride_B;
StrideC stride_C;
StrideD stride_D;
cutlass::device_memory::allocation<ElementA> tensor_a;
cutlass::device_memory::allocation<ElementB> tensor_b;
cutlass::device_memory::allocation<ElementC> tensor_c;
cutlass::device_memory::allocation<ElementD> tensor_d;
cutlass::device_memory::allocation<int> gather_indices;
cutlass::device_memory::allocation<ElementA> tensor_a_gathered;
cutlass::device_memory::allocation<ElementB> tensor_b_gathered;
cutlass::device_memory::allocation<ElementC> tensor_c_gathered;
cutlass::device_memory::allocation<ElementD> tensor_d_gathered;
cutlass::device_memory::allocation<ElementD> tensor_d_reference;
cutlass::gemm::GemmUniversalMode gemm_mode;
Gemm gemm;
typename Gemm::Arguments arguments;
cutlass::device_memory::allocation<uint8_t> workspace;
GemmRef gemm_ref;
typename GemmRef::Arguments arguments_ref;
cutlass::device_memory::allocation<uint8_t> workspace_ref;
GemmOpt gemm_opt;
typename GemmOpt::Arguments arguments_opt;
cutlass::device_memory::allocation<uint8_t> workspace_opt;
ExampleRunner(Options const &options, cutlass::KernelHardwareInfo const &hw_info)
: problem_size_orig(options.problem_size),
problem_size(GatherModeM ? options.index_size : problem_size_orig.m(),
GatherModeN ? options.index_size : problem_size_orig.n(),
GatherModeK ? options.index_size : problem_size_orig.k(),
problem_size_orig.batch()),
problem_shape_orig(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.k(), problem_size_orig.batch()),
problem_shape(problem_size.m(), problem_size.n(), problem_size.k(), problem_size.batch()),
hw_info(hw_info),
alpha(options.alpha),
beta(options.beta),
stride_A_orig(cutlass::make_cute_packed_stride(
StrideA{}, make_shape(problem_size_orig.m(), problem_size_orig.k(), problem_size_orig.batch()))),
stride_B_orig(cutlass::make_cute_packed_stride(
StrideB{}, make_shape(problem_size_orig.n(), problem_size_orig.k(), problem_size_orig.batch()))),
stride_C_orig(cutlass::make_cute_packed_stride(
StrideC{}, make_shape(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.batch()))),
stride_D_orig(cutlass::make_cute_packed_stride(
StrideD{}, make_shape(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.batch()))),
stride_A(cutlass::make_cute_packed_stride(
StrideA{}, make_shape(problem_size.m(), problem_size.k(), problem_size.batch()))),
stride_B(cutlass::make_cute_packed_stride(
StrideB{}, make_shape(problem_size.n(), problem_size.k(), problem_size.batch()))),
stride_C(cutlass::make_cute_packed_stride(
StrideC{}, make_shape(problem_size.m(), problem_size.n(), problem_size.batch()))),
stride_D(cutlass::make_cute_packed_stride(
StrideD{}, make_shape(problem_size.m(), problem_size.n(), problem_size.batch()))),
tensor_a(problem_size_orig.m() * problem_size_orig.k() * problem_size_orig.batch()),
tensor_b(problem_size_orig.k() * problem_size_orig.n() * problem_size_orig.batch()),
tensor_c(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
tensor_d(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
gather_indices(options.index_size),
tensor_a_gathered(problem_size.m() * problem_size.k() * problem_size_orig.batch()),
tensor_b_gathered(problem_size.k() * problem_size.n() * problem_size_orig.batch()),
tensor_c_gathered(problem_size.m() * problem_size.n() * problem_size_orig.batch()),
tensor_d_gathered(problem_size.m() * problem_size.n() * problem_size_orig.batch()),
tensor_d_reference(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
gemm_mode(problem_size.batch() > 1 ? cutlass::gemm::GemmUniversalMode::kBatched : cutlass::gemm::GemmUniversalMode::kGemm),
gemm(),
// When constructing arguments for gather/scatter gemm, we must pass stride arguments
// made for the original (non-gathered) problem size, because they are used to access
// tensors of the original shape. However we still use the reduced (gathered) problem
// shape since it corresponds to the logical indexing in reduced size GEMM.
arguments{
gemm_mode,
problem_shape,
{
tensor_a.get(),
stride_A_orig,
tensor_b.get(),
stride_B_orig
},
{
{ alpha, beta },
tensor_c.get(), stride_C_orig,
tensor_d.get(), stride_D_orig,
typename Epilogue::GatherC {gather_indices.get()},
typename Epilogue::ScatterD{gather_indices.get()}
},
hw_info,
{},
typename Kernel::GatherA{gather_indices.get()},
typename Kernel::GatherB{gather_indices.get()}
},
workspace(Gemm::get_workspace_size(arguments)),
gemm_ref(),
arguments_ref{
gemm_mode,
problem_shape,
{
DoGatherA ? tensor_a_gathered.get() : tensor_a.get(),
stride_A,
DoGatherB ? tensor_b_gathered.get() : tensor_b.get(),
stride_B
},
{
{ alpha, beta },
DoGatherC ? tensor_c_gathered.get() : tensor_c.get(),
stride_C,
DoScatterD ? tensor_d_gathered.get() : tensor_d_reference.get(),
stride_D
},
hw_info
},
workspace_ref(GemmRef::get_workspace_size(arguments_ref)),
gemm_opt(),
arguments_opt{
gemm_mode,
problem_shape,
{
DoGatherA ? tensor_a_gathered.get() : tensor_a.get(),
stride_A,
DoGatherB ? tensor_b_gathered.get() : tensor_b.get(),
stride_B
},
{
{ alpha, beta },
DoGatherC ? tensor_c_gathered.get() : tensor_c.get(),
stride_C,
DoScatterD ? tensor_d_gathered.get() : tensor_d_reference.get(),
stride_D
},
hw_info
},
workspace_opt(GemmOpt::get_workspace_size(arguments_opt))
{
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::device::BlockFillRandomUniform(tensor_a.get(), tensor_a.size(), 1, ElementA(7), ElementA(-8), 0);
cutlass::reference::device::BlockFillRandomUniform(tensor_b.get(), tensor_b.size(), 1, ElementB(7), ElementB(-8), 0);
cutlass::reference::device::BlockFillRandomUniform(tensor_c.get(), tensor_c.size(), 1, ElementC(7), ElementC(-8), 0);
cutlass::reference::device::BlockFillSequential(tensor_d.get(), tensor_d.size(), ElementD(0), ElementD(0));
// <- Fill gather_indices with unique random integers in range [0,n)
int index_range = GatherModeM ? problem_size_orig.m() : (GatherModeN ? problem_size_orig.n() : problem_size_orig.k());
std::vector<int> indices(index_range);
std::iota(indices.begin(), indices.end(), 0);
{ // std::random_shuffle was deprecated in C++14 and removed in C++17
std::random_device make_seed;
std::mt19937 source_of_randomness(make_seed());
std::shuffle(indices.begin(), indices.end(), source_of_randomness);
}
gather_indices.copy_from_host(indices.data());
auto const gemm_init = [](auto & gemm, auto const & arguments, auto & workspace)
{
cutlass::Status status = gemm.can_implement(arguments);
CUTLASS_CHECK(status);
status = gemm.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
};
gemm_init(gemm, arguments, workspace );
gemm_init(gemm_ref, arguments_ref, workspace_ref);
gemm_init(gemm_opt, arguments_opt, workspace_opt);
}
void debug_output(std::ostream & os)
{
auto print_tensor = [](std::ostream &os, char const * name, auto const & data, auto shape, auto stride)
{
std::vector<remove_cvref_t<decltype(*data.get())>> h_data(data.size());
data.copy_to_host(h_data.data());
Tensor t = make_tensor(h_data.data(), shape, stride);
os << "\n" << name << ": " << std::setw(4) << t << std::endl;
};
{
auto [M,N,K,L] = problem_shape_orig;
print_tensor(os, "A", tensor_a, make_shape(M,K,L), stride_A_orig);
print_tensor(os, "B", tensor_b, make_shape(N,K,L), stride_B_orig);
print_tensor(os, "C", tensor_c, make_shape(M,N,L), stride_C_orig);
print_tensor(os, "D", tensor_d, make_shape(M,N,L), stride_D_orig);
print_tensor(os, "D reference", tensor_d_reference, make_shape(M,N,L), stride_D_orig);
print_tensor(os, "indices", gather_indices, make_shape(gather_indices.size()), make_stride(_1{}));
}
}
template<class Gemm2>
static void run_gemm(Gemm2 &gemm)
{
cutlass::Status status = gemm.run();
CUTLASS_CHECK(status);
}
template<class Gemm2>
void run_reference(Gemm2 &gemm)
{
// Convenience wrapper around calls to separate gather/scatter kernels
auto run_gather = [this](auto call, auto const & input, auto & output, auto gather_func, auto batch_size, auto stride)
{
[[maybe_unused]] auto idx = find_if(stride, [](auto x){ return not is_constant<1, decltype(x)>{}; });
constexpr int I = decltype(idx)::value;
call(input.get(),
output.get(),
gather_func,
batch_size,
static_cast<int>(input.size() / batch_size),
static_cast<int>(output.size() / batch_size),
static_cast<int>(get<I>(stride)),
hw_info);
};
// Forward calls via lambda to avoid specifying template arguments
auto gather_call = [](auto&&... args){ gather(static_cast<decltype(args)&&>(args)...); };
// MSVC doesn't count use inside a false "if constexpr" branch.
[[maybe_unused]] auto scatter_call = [](auto&&... args){ scatter(static_cast<decltype(args)&&>(args)...); };
if constexpr (DoGatherA) {
run_gather(gather_call, tensor_a, tensor_a_gathered, arguments.gather_A, problem_size.batch(), stride_A);
}
if constexpr (DoGatherB) {
run_gather(gather_call, tensor_b, tensor_b_gathered, arguments.gather_B, problem_size.batch(), stride_B);
}
if constexpr (DoGatherC) {
if (beta != ElementComputeEpilogue(0)) {
run_gather(gather_call, tensor_c, tensor_c_gathered, arguments.epilogue.gather_C, problem_size.batch(), stride_C);
}
}
run_gemm(gemm);
if constexpr (DoScatterD) {
run_gather(scatter_call, tensor_d_gathered, tensor_d_reference, arguments.epilogue.scatter_D, problem_size.batch(), stride_D);
}
}
bool verify()
{
run_gemm(gemm);
run_reference(gemm_ref);
cudaDeviceSynchronize();
return cutlass::reference::device::BlockCompareEqual(tensor_d.get(), tensor_d_reference.get(), tensor_d.size());
}
bool run(Options const &options)
{
if (options.reference_check) {
if (!verify()) {
std::cout << "Failed validation" << std::endl;
#if 0
debug_output(std::cout);
#endif
return false;
}
else {
std::cout << "Passed validation" << std::endl;
}
}
//
// Run profiling loop
//
auto const benchmark = [&](auto name, auto func)
{
GpuTimer timer;
timer.start();
for (int iter = 0; iter < options.iterations; ++iter) {
func();
}
timer.stop();
double runtime = timer.elapsed_millis() / double(options.iterations);
double gflops = 2 * double(problem_size.product()) / 1e6 / runtime; // Two flops per multiply-add
std::cout << name << ":\n";
std::cout << " Runtime: " << runtime << " ms\n";
std::cout << " GFLOPs: " << gflops << "\n";
};
benchmark("Fused", [&](){ run_gemm(gemm); });
benchmark("Unfused default", [&](){ run_reference(gemm_ref); });
benchmark("Unfused optimized", [&](){ run_reference(gemm_opt); });
return true;
}
};
} // namespace example
int main(int argc, const char ** argv) {
bool notSupported = false;
// CUDA 12 minimum required
if (__CUDACC_VER_MAJOR__ < 12) {
std::cerr << "This example requires CUDA Toolkit version 12 or later.\n";
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (props.major < 9) {
std::cerr << "This example requires a device with compute capability 90 or higher.\n";
notSupported = true;
}
if (notSupported) {
return EXIT_SUCCESS; // Do not fail CI checks on unsupported systems
}
example::Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << "\n";
return EXIT_SUCCESS;
}
if (!options.valid()) {
std::cerr << "Invalid arguments." << "\n";
return EXIT_FAILURE;
}
cutlass::KernelHardwareInfo hw_info;
hw_info.device_id = 0;
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
bool result = true;
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
switch (options.mode) {
using namespace example;
case 0: {
std::cout << "Gather A,C + scatter D on M mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // A
cutlass::half_t, cutlass::layout::ColumnMajor, NoGather, // B
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // C
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
case 1: {
std::cout << "Gather B,C + scatter D on N mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // A
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // B
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // C
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
case 2: {
std::cout << "Gather A,B on K mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // A
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // B
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // C
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
}
#endif
return result ? EXIT_SUCCESS : EXIT_FAILURE;
}
| examples/52_hopper_gather_scatter_fusion/52_hopper_gather_scatter_fusion.cu/0 | {
"file_path": "examples/52_hopper_gather_scatter_fusion/52_hopper_gather_scatter_fusion.cu",
"repo_id": "examples",
"token_count": 10782
} | 17 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Hopper Ptr-Array Batched GEMM example using CUTLASS 3 APIs for NVIDIA Hopper architecture.
This example demonstrates an implementation of Ptr-Array Batched GEMM using a TMA + GMMA
warp-specialized cooperative kernel.
The new feature showcased in this example is on-the-fly modification of TMA descriptors
to move between batches (represented by l).
To run this example:
$ ./examples/56_hopper_ptr_array_batched_gemm/56_hopper_ptr_array_batched_gemm --m=2048 --n=2048 --k=2048 --l=10
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cutlass/tensor_ref.h"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/group_array_problem_shape.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "helper.h"
using namespace cute;
#if defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM kernel configurations
/////////////////////////////////////////////////////////////////////////////////////////////////
// A matrix configuration
using ElementA = cutlass::half_t; // Element type for A matrix operand
using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes)
// B matrix configuration
using ElementB = cutlass::half_t; // Element type for B matrix operand
using LayoutB = cutlass::layout::ColumnMajor; // Layout type for B matrix operand
constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes)
// C/D matrix configuration
using ElementC = cutlass::half_t; // Element type for C and D matrix operands
using LayoutC = cutlass::layout::ColumnMajor; // Layout type for C and D matrix operands
constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Memory access granularity/alignment of C matrix in units of elements (up to 16 bytes)
// Core kernel configurations
using ElementAccumulator = float; // Element type for internal accumulation
using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that supports the intended feature
using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag
using TileShape = Shape<_256,_128,_64>; // Threadblock-level tile size
using ClusterShape = Shape<_1,_2,_1>; // Shape of the threadblocks in a cluster
using StageCountType = cutlass::gemm::collective::StageCountAuto; // Stage count maximized based on the tile size
using KernelSchedule = cutlass::gemm::KernelPtrArrayTmaWarpSpecializedCooperative; // Kernel to launch
using EpilogueSchedule = cutlass::epilogue::PtrArrayNoSmemWarpSpecialized; // Epilogue to launch
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape, ClusterShape,
cutlass::epilogue::collective::EpilogueTileAuto,
ElementAccumulator, ElementAccumulator,
ElementC, LayoutC, AlignmentC,
ElementC, LayoutC, AlignmentC,
EpilogueSchedule
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
ArchTag, OperatorClass,
ElementA, LayoutA, AlignmentA,
ElementB, LayoutB, AlignmentB,
ElementAccumulator,
TileShape, ClusterShape,
cutlass::gemm::collective::StageCountAutoCarveout<
static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
KernelSchedule
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
cutlass::gemm::ArrayProblemShape<Shape<int,int,int,int>>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
// Reference device GEMM implementation type
using DeviceGemmReference = cutlass::reference::device::Gemm<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
ElementAccumulator>;
using StrideA = typename Gemm::GemmKernel::StrideA;
using StrideB = typename Gemm::GemmKernel::StrideB;
using StrideC = typename Gemm::GemmKernel::StrideC;
using StrideD = typename Gemm::GemmKernel::StrideD;
StrideA stride_A;
StrideB stride_B;
StrideC stride_C;
StrideD stride_D;
uint64_t seed;
std::vector<int64_t> offset_A;
std::vector<int64_t> offset_B;
std::vector<int64_t> offset_C;
std::vector<int64_t> offset_D;
cutlass::DeviceAllocation<typename Gemm::ElementA> block_A;
cutlass::DeviceAllocation<typename Gemm::ElementB> block_B;
cutlass::DeviceAllocation<typename Gemm::ElementC> block_C;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput> block_D;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput> block_ref_D;
cutlass::DeviceAllocation<const typename Gemm::ElementA *> ptr_A;
cutlass::DeviceAllocation<const typename Gemm::ElementB *> ptr_B;
cutlass::DeviceAllocation<const typename Gemm::ElementC *> ptr_C;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput *> ptr_D;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput *> ptr_ref_D;
#endif // defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Testbed utility types
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help = false;
float alpha = 1.0f;
float beta = 0.0f;
int iterations = 10;
int m = 1024, n = 512, k = 1024, l = 10;
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("m", m);
cmd.get_cmd_line_argument("n", n);
cmd.get_cmd_line_argument("k", k);
cmd.get_cmd_line_argument("l", l);
cmd.get_cmd_line_argument("alpha", alpha, 1.f);
cmd.get_cmd_line_argument("beta", beta, 0.f);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "56_hopper_ptr_array_batched_gemm\n\n"
<< " Hopper FP32 GEMM using a Warp Specialized kernel.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement\n\n"
<< " --m=<int> Sets the M extent of the GEMM\n"
<< " --n=<int> Sets the N extent of the GEMM\n"
<< " --k=<int> Sets the K extent of the GEMM\n"
<< " --l=<int> Sets the batch count for Ptr-Array GEMM\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --iterations=<int> Number of profiling iterations to perform\n\n";
out
<< "\n\nExamples:\n\n"
<< "$ " << "56_hopper_ptr_array_batched_gemm" << " --m=1024 --n=512 --k=1024 --l=10 --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const
{
// Two flops per multiply-add
uint64_t flop = uint64_t(2) * m * n * k * l;
double gflop = double(flop) / double(1.0e9);
return gflop / runtime_s;
}
};
/// Result structure
struct Result
{
double avg_runtime_ms = 0.0;
double gflops = 0.0;
cutlass::Status status = cutlass::Status::kSuccess;
cudaError_t error = cudaSuccess;
bool passed = false;
};
#if defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM setup and evaluation
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to initialize a block of device data
template <class Element>
bool initialize_block(
cutlass::DeviceAllocation<Element>& block,
uint64_t seed=2023) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
block.get(), block.size(), seed, scope_max, scope_min, 0);
return true;
}
/// Allocates device-side data
void allocate(const Options &options) {
int64_t total_elements_A = 0;
int64_t total_elements_B = 0;
int64_t total_elements_C = 0;
int64_t total_elements_D = 0;
for (int32_t i = 0; i < options.l; ++i) {
offset_A.push_back(total_elements_A);
offset_B.push_back(total_elements_B);
offset_C.push_back(total_elements_C);
offset_D.push_back(total_elements_D);
int64_t elements_A = options.m * options.k;
int64_t elements_B = options.k * options.n;
int64_t elements_C = options.m * options.n;
int64_t elements_D = options.m * options.n;
total_elements_A += elements_A;
total_elements_B += elements_B;
total_elements_C += elements_C;
total_elements_D += elements_D;
}
block_A.reset(total_elements_A);
block_B.reset(total_elements_B);
block_C.reset(total_elements_C);
block_D.reset(total_elements_D);
block_ref_D.reset(total_elements_D);
}
/// Initialize operands to be used in the GEMM and reference GEMM
void initialize(const Options &options) {
stride_A = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(options.m, options.k, options.l));
stride_B = cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(options.n, options.k, options.l));
stride_C = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(options.m, options.n, options.l));
stride_D = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(options.m, options.n, options.l));
//
// Assign pointers
//
std::vector<ElementA *> ptr_A_host(options.l);
std::vector<ElementB *> ptr_B_host(options.l);
std::vector<ElementC *> ptr_C_host(options.l);
std::vector<ElementC *> ptr_D_host(options.l);
for (int32_t i = 0; i < options.l; ++i) {
ptr_A_host.at(i) = block_A.get() + offset_A.at(i);
ptr_B_host.at(i) = block_B.get() + offset_B.at(i);
ptr_C_host.at(i) = block_C.get() + offset_C.at(i);
ptr_D_host.at(i) = block_D.get() + offset_D.at(i);
}
ptr_A.reset(options.l);
ptr_A.copy_from_host(ptr_A_host.data());
ptr_B.reset(options.l);
ptr_B.copy_from_host(ptr_B_host.data());
ptr_C.reset(options.l);
ptr_C.copy_from_host(ptr_C_host.data());
ptr_D.reset(options.l);
ptr_D.copy_from_host(ptr_D_host.data());
initialize_block(block_A, seed + 2023);
initialize_block(block_B, seed + 2022);
initialize_block(block_C, seed + 2021);
}
/// Populates a Gemm::Arguments structure from the given commandline options
typename Gemm::Arguments args_from_options(const Options &options)
{
cutlass::KernelHardwareInfo hw_info;
// Change device_id to another value if you are running on a machine with multiple GPUs and wish
// to use a GPU other than that with device ID 0.
hw_info.device_id = 0;
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kArray,
{{options.m, options.n, options.k, options.l}},
{ptr_A.get(), stride_A, ptr_B.get(), stride_B},
{{options.alpha, options.beta}, ptr_C.get(), stride_C, ptr_D.get(), stride_D},
hw_info
};
return arguments;
}
bool verify(const Options &options) {
bool passed = true;
for (int32_t i = 0; i < options.l; ++i) {
cutlass::TensorRef ref_A(block_A.get() + offset_A.at(i), Gemm::LayoutA::packed({options.m, options.k}));
cutlass::TensorRef ref_B(block_B.get() + offset_B.at(i), Gemm::LayoutB::packed({options.k, options.n}));
cutlass::TensorRef ref_C(block_C.get() + offset_C.at(i), Gemm::LayoutC::packed({options.m, options.n}));
cutlass::TensorRef ref_D(block_ref_D.get() + offset_D.at(i), Gemm::LayoutD::packed({options.m, options.n}));
//
// Compute reference output
//
// Create instantiation for device reference gemm kernel
DeviceGemmReference gemm_reference;
// Launch device reference gemm kernel
gemm_reference(
{options.m, options.n, options.k},
ElementAccumulator(options.alpha),
ref_A,
ref_B,
ElementAccumulator(options.beta),
ref_C,
ref_D);
// Wait for kernel to finish
CUDA_CHECK(cudaDeviceSynchronize());
// Check if output from CUTLASS kernel and reference kernel are equal or not
passed &= cutlass::reference::device::BlockCompareEqual(block_ref_D.get() + offset_D.at(i), block_D.get() + offset_D.at(i), options.m * options.n);
}
return passed;
}
/// Execute a given example GEMM computation
template <typename Gemm>
int run(Options &options)
{
allocate(options);
initialize(options);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm;
// Create a structure of gemm kernel arguments suitable for invoking an instance of Gemm
auto arguments = args_from_options(options);
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check if the problem size is supported or not
CUTLASS_CHECK(gemm.can_implement(arguments));
// Initialize CUTLASS kernel with arguments and workspace pointer
CUTLASS_CHECK(gemm.initialize(arguments, workspace.get()));
// Correctness / Warmup iteration
CUTLASS_CHECK(gemm.run());
// Check if output from CUTLASS kernel and reference kernel are equal or not
Result result;
result.passed = verify(options);
std::cout << " Disposition: " << (result.passed ? "Passed" : "Failed") << std::endl;
if (!result.passed) {
exit(-1);
}
// Run profiling loop
if (options.iterations > 0)
{
GpuTimer timer;
timer.start();
for (int iter = 0; iter < options.iterations; ++iter) {
CUTLASS_CHECK(gemm.initialize(arguments, workspace.get()));
CUTLASS_CHECK(gemm.run());
}
timer.stop();
// Compute average setup and runtime and GFLOPs.
float elapsed_ms = timer.elapsed_millis();
result.avg_runtime_ms = double(elapsed_ms) / double(options.iterations);
result.gflops = options.gflops(result.avg_runtime_ms / 1000.0);
std::cout << " Problem Size: " << options.m << 'x' << options.n << 'x' << options.k << std::endl;
std::cout << " Batches : " << options.l << std::endl;
std::cout << " Alpha, Beta : " << options.alpha << ',' << options.beta << std::endl;
std::cout << " Avg runtime : " << result.avg_runtime_ms << " ms" << std::endl;
std::cout << " GFLOPS : " << result.gflops << std::endl;
}
return 0;
}
#endif // defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
// CUTLASS must be compiled with CUDA 12.3 Toolkit to run this example
if (__CUDACC_VER_MAJOR__ < 12 || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ < 3)) {
std::cerr << "This example requires CUDA 12.3 or newer.\n";
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
cudaDeviceProp props;
int current_device_id;
CUDA_CHECK(cudaGetDevice(¤t_device_id));
CUDA_CHECK(cudaGetDeviceProperties(&props, current_device_id));
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (props.major < 9) {
std::cerr
<< "This example requires a GPU of NVIDIA's Hopper Architecture or "
<< "later (compute capability 90 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
//
// Evaluate CUTLASS kernels
//
#if defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
run<Gemm>(options);
#endif
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/56_hopper_ptr_array_batched_gemm/56_hopper_ptr_array_batched_gemm.cu/0 | {
"file_path": "examples/56_hopper_ptr_array_batched_gemm/56_hopper_ptr_array_batched_gemm.cu",
"repo_id": "examples",
"token_count": 7159
} | 18 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/container/alignment.hpp>
#include <cute/tensor.hpp>
#include <cute/tensor_predicate.hpp>
#include <cute/atom/copy_atom.hpp>
namespace cute
{
//
// Accept mutable temporaries
//
template <class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return copy(src, dst);
}
template <class VecType,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_vec(Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return copy_vec<VecType>(src, dst);
}
template <class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_aligned(Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return copy_aligned(src, dst);
}
template <class PrdTensor,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_if(PrdTensor const& pred,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return copy_if(pred, src, dst);
}
template <class CopyPolicy,
class PrdTensor,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_if(CopyPolicy const& copy_policy,
PrdTensor const& pred,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return copy_if(copy_policy, pred, src, dst);
}
template <class CopyPolicy,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(CopyPolicy const& copy_policy,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return copy(copy_policy, src, dst);
}
//
// copy_if -- Predicated Copy
//
template <class PrdTensor,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_if(PrdTensor const& pred,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
auto copy_op = select_elementwise_copy(src, dst);
CUTE_UNROLL
for (int i = 0; i < size(src); ++i) {
if (pred(i)) {
copy_op.copy(src(i), dst(i));
}
}
}
//
// copy_if -- Predicated CopyAtom
//
namespace detail {
// Trait that detects if atom's traits has a member function with(bool)
template <class, class Enable = void>
constexpr bool has_with_bool = false;
template <class T>
constexpr bool has_with_bool<T, cute::void_t<decltype(declval<typename T::Traits>().with(declval<bool>()))>> = true;
} // end namespace detail
template <class... CopyArgs,
class PredTensor,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_if(Copy_Atom<CopyArgs...> const& copy_atom,
PredTensor const& pred, // (Rest...)
Tensor<SrcEngine, SrcLayout> const& src, // (V,Rest...)
Tensor<DstEngine, DstLayout> & dst) // (V,Rest...)
{
static_assert(SrcLayout::rank == DstLayout::rank, "CopyAtom rank-mismatch.");
if constexpr (SrcLayout::rank == 1) { // Dispatch the copy
copy_atom.call(src, dst);
} else { // Loop over all but the first mode
constexpr int R = SrcLayout::rank;
Tensor src_v = group_modes<1,R>(src);
Tensor dst_v = group_modes<1,R>(dst);
CUTE_UNROLL
for (int i = 0; i < size<1>(src_v); ++i) {
// If copy traits can be transformed with a predicate value, do it, otherwise branch here
if constexpr (detail::has_with_bool<Copy_Atom<CopyArgs...>>) {
copy_atom.with(pred(i)).call(src_v(_,i), dst_v(_,i));
} else {
if (pred(i)) {
copy_atom.call(src_v(_,i), dst_v(_,i));
}
}
}
}
}
//
// copy_vec -- attempt vectorized copy with VecType
//
template <class VecType,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_vec(Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
static_assert(sizeof_bits_v<VecType> >= 8 && sizeof_bits_v<VecType> % 8 == 0,
"Expected a vectorization type of at least a byte.");
using SrcType = typename SrcEngine::element_type;
using DstType = typename DstEngine::element_type;
if constexpr (sizeof_bits_v<SrcType> == sizeof_bits_v<DstType> &&
sizeof_bits_v<VecType> > sizeof_bits_v<DstType>)
{
// Preserve volatility of Src/Dst types.
using SrcVecType = conditional_t<is_volatile_v<SrcType>, VecType const volatile, VecType const>;
using DstVecType = conditional_t<is_volatile_v<DstType>, VecType volatile, VecType >;
Tensor src_v = recast<SrcVecType>(src);
Tensor dst_v = recast<DstVecType>(dst);
#if 0
if (thread0()) {
print("copy_vec<%db> -- vectorizing copy:\n", int(sizeof_bits_v<VecType>));
print(" "); print(src); print(" => "); print(src_v); print("\n");
print(" "); print(dst); print(" => "); print(dst_v); print("\n");
}
#endif
return copy_if(TrivialPredTensor{}, src_v, dst_v);
} else {
#if 0
if (thread0()) {
print("copy_vec<%db> -- NOT vectorizing copy:\n", int(sizeof_bits_v<VecType>));
print(" "); print(src); print("\n");
print(" "); print(dst); print("\n");
}
#endif
return copy_if(TrivialPredTensor{}, src, dst);
}
}
//
// copy -- CopyAtom
//
template <class... CopyArgs,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Copy_Atom<CopyArgs...> const& copy_atom,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
return copy_if(copy_atom, TrivialPredTensor{}, src, dst);
}
//////////////////////////////////////////
// Special Auto-Vectorizing Overloads
//////////////////////////////////////////
// Specialization for AutoVectorizingCopyAssumedAlignment<MaxVecBits>
template <int MaxVecBits, class... Args,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(AutoVectorizingCopyWithAssumedAlignment<MaxVecBits> const&,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
constexpr int vec_elem = decltype(max_common_vector(src, dst))::value;
constexpr int src_bits = sizeof_bits<typename SrcEngine::value_type>::value;
// When layouts are static, accept vec_bits up to 128
// When layouts are dynamic, accept vec_bits up to MaxVecBits
constexpr int vec_bits = (is_static<SrcLayout>::value && is_static<DstLayout>::value) ?
cute::min(vec_elem * src_bits, 128) :
cute::min(vec_elem * src_bits, MaxVecBits);
#if 0
if (thread0()) {
print("copy -- found max_common_vector of %d elems and vectorization to %d bits\n", vec_elem, vec_bits);
print(" "); print(src); print("\n");
print(" "); print(dst); print("\n");
}
#endif
if constexpr (vec_elem > 1 && vec_bits >= 8) {
return copy_vec<uint_bit_t<vec_bits>>(src, dst);
} else {
return copy_if(TrivialPredTensor{}, src, dst);
}
}
// Auto-vectorizing copy for static layouts
template <class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
return copy(AutoVectorizingCopy{}, src, dst);
}
// Auto-vectorizing copy with assumed alignment of dynamic layout strides up to 128bit.
template <class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy_aligned(Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
return copy(AutoVectorizingCopyWithAssumedAlignment<128>{}, src, dst);
}
// Specializaton for Atom AutoVectorizingCopy
template <class... Args,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Copy_Atom<AutoVectorizingCopy, Args...> const&,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
return copy(AutoVectorizingCopy{}, src, dst);
}
// Specializaton for Atom AutoVectorizingCopyAssumedAlignment
template <int MaxVecBits, class... Args,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, Args...> const&,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
return copy(AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>{}, src, dst);
}
#if defined(CUTE_COPY_ATOM_TMA_SM90_ENABLED)
template <class... CT_Args,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...> const& atom, // Copy_Traits may or may not have the memory barrier in it already
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
using SrcType = typename SrcEngine::value_type;
using DstType = typename DstEngine::value_type;
static_assert(sizeof_bits<SrcType>::value == sizeof_bits<DstType>::value);
static_assert((is_gmem<SrcEngine>::value && is_smem<DstEngine>::value) ||
(is_smem<SrcEngine>::value && is_gmem<DstEngine>::value),
"Bulk Copy only supports gmem -> smem or smem -> gmem movement.");
// G2S or S2G dispatch
using BULK_COPY_OP = conditional_t<is_gmem<SrcEngine>::value,
SM90_BULK_COPY_G2S,
SM90_BULK_COPY_S2G>;
// Find the common subtensor of src and dst
auto tiler = max_common_layout(src, dst);
constexpr int vec_elem = decltype(size(tiler))::value;
constexpr int vec_bits = vec_elem * sizeof_bits_v<SrcType>;
static_assert(vec_bits >= 128, "Expected at least 128-bits for BLKCP");
// Construct a new concrete Atom of the vector size
using BulkAtom = Copy_Atom<Copy_Traits<BULK_COPY_OP, Int<vec_bits>, CT_Args...>, SrcType>;
auto bulk_atom = apply(atom.opargs_, [](auto const&... args) { return BulkAtom{args...}; });
#if 0
if (thread0()) {
print("copy blkcp -- found a max_common_layout of "); print(tiler); print("\n");
print(" "); print(src); print("\n");
print(" "); print(dst); print("\n");
}
#endif
return copy(bulk_atom, logical_divide(src, tiler), logical_divide(dst, tiler));
}
// Backwards-compat. Throw out any extra Copy_Atom args.
template <class... CT_Args, class... CA_Args,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
copy(Copy_Atom<Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...>, CA_Args...> const& atom,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
return copy(static_cast<Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...> const&>(atom), src, dst);
}
#endif // #if defined(CUTE_COPY_ATOM_TMA_SM90_ENABLED)
} // end namespace cute
| include/cute/algorithm/copy.hpp/0 | {
"file_path": "include/cute/algorithm/copy.hpp",
"repo_id": "include",
"token_count": 5672
} | 19 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <cuda.h>
#endif
#include <cute/atom/copy_traits_sm90_tma_swizzle.hpp>
#include <cute/atom/copy_traits.hpp>
#include <cute/atom/copy_atom.hpp>
#include <cute/algorithm/prefetch.hpp>
#include <cute/numeric/integral_ratio.hpp>
namespace cute
{
template <class GmemTmaBasisStrides_, class TmaGmemBasis_, class TmaSwizzle_>
struct AuxTmaParams {
using GmemStrides = GmemTmaBasisStrides_; // Strides for Gmem mode -> Tma coord mode, may be dynamic
GmemStrides g_stride_;
using TmaGmemBasis = TmaGmemBasis_; // Layout for Tma box shape -> Gmem mode(s), always static
static_assert(is_static<TmaGmemBasis>::value);
using TmaSwizzle = TmaSwizzle_; // Tma swizzle, always Swizzle<B,M,S>
static_assert(is_static<TmaSwizzle>::value);
};
// Utility for unpacking TMA_LOAD arguments into a CopyOp
template <class CopyOp>
struct TMA_LOAD_Unpack
{
template <class... Args,
class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr void
copy_unpack(Copy_Traits<CopyOp, Args...> const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
auto src_coord = src.data().coord_;
if constexpr (detail::is_prefetch<CopyOp>) {
return detail::explode_tuple(detail::CallCOPY<CopyOp>{},
traits.opargs_, tuple_seq<decltype(traits.opargs_)>{},
src_coord, tuple_seq<decltype(src_coord)>{});
} else {
static_assert(is_smem<TD>::value, "SM90_TMA_LOAD requires the destination be shared memory.");
void* dst_ptr = cute::raw_pointer_cast(dst.data());
#if 0
auto [c0,c1,c2,c3,c4] = append<5>(src_coord, 0);
printf("THR (%d,%d,%d) BLK (%d,%d,%d) TMACRD (%d,%d,%d,%d,%d) SMEMADDR (%p)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
int32_t(c0), int32_t(c1), int32_t(c2), int32_t(c3), int32_t(c4), dst_ptr);
#endif
return detail::explode_tuple(detail::CallCOPY<CopyOp>{},
traits.opargs_, tuple_seq<decltype(traits.opargs_)>{},
make_tuple(dst_ptr), seq<0>{},
src_coord, tuple_seq<decltype(src_coord)>{});
}
}
};
//////////////////////////////////////////////////////////////////////////////
///////////////////////////// TMA_LOAD ///////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
struct SM90_TMA_LOAD_OP : SM90_TMA_LOAD {};
// The non-executable SM90_TMA_LOAD with tma_desc and no tma_mbar
// Use .with(tma_mbar) to construct an executable version
template <class NumBitsPerTMA, class AuxParams_>
struct Copy_Traits<SM90_TMA_LOAD, NumBitsPerTMA, AuxParams_>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_LOAD arguments
TmaDescriptor tma_desc_;
using AuxParams = AuxParams_;
AuxParams aux_params_;
// Return TmaDescriptor/TensorMap
CUTE_HOST_DEVICE constexpr
TmaDescriptor const*
get_tma_descriptor() const {
return &tma_desc_;
}
// Construct an executable SM90_TMA_LOAD with tma_mbar
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_TMA_LOAD_OP, NumBitsPerTMA>
with(uint64_t& tma_mbar, [[maybe_unused]] uint16_t const& multicast_mask = 0) const {
// We accept multicast_mask here to keep the API for both atoms consistent
return {{}, {&tma_desc_, &tma_mbar}};
}
// Construct an executable SM90_TMA_LOAD with tma_mbar (temp. overloaded for grouped gemm/ptr array gemm)
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_TMA_LOAD_OP, NumBitsPerTMA>
with(TmaDescriptor const* new_tma_desc, uint64_t& tma_mbar, [[maybe_unused]] uint16_t const& multicast_mask = 0) const {
// We accept multicast_mask here to keep the API for both atoms consistent
return {{}, {new_tma_desc, &tma_mbar}};
}
// Generate the TMA coord tensor
template <class GShape>
CUTE_HOST_DEVICE constexpr
auto
get_tma_tensor(GShape const& g_shape) const {
static_assert(is_congruent<decltype(g_shape), decltype(aux_params_.g_stride_)>::value);
return make_counting_tensor(make_layout(g_shape, aux_params_.g_stride_));
}
// Don't try to execute a copy with SM90_TMA_LOAD before calling .with()
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst) = delete;
};
// The executable SM90_TMA_LOAD with tma_desc and tma_mbar
template <class NumBitsPerTMA>
struct Copy_Traits<SM90_TMA_LOAD_OP, NumBitsPerTMA>
: TMA_LOAD_Unpack<SM90_TMA_LOAD_OP>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_LOAD arguments
tuple<
TmaDescriptor const*,
uint64_t* // smem mbarrier
> const opargs_;
};
// The prefetch for SM90_TMA_LOAD with tma_desc
template <class NumBitsPerTMA, class... Args>
struct Copy_Traits<SM90_TMA_LOAD::PREFETCH, NumBitsPerTMA, Args...>
: TMA_LOAD_Unpack<SM90_TMA_LOAD::PREFETCH>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_LOAD::PREFETCH arguments
tuple<TmaDescriptor const*> const opargs_;
// Construct with any other Traits' TMA Desc
template <class... CopyArgs>
CUTE_HOST_DEVICE
Copy_Traits(Copy_Traits<CopyArgs...> const& traits)
: opargs_({&traits.tma_desc_}) {}
};
//////////////////////////////////////////////////////////////////////////////
///////////////////////////// TMA_LOAD_MULTICAST /////////////////////////////
//////////////////////////////////////////////////////////////////////////////
struct SM90_TMA_LOAD_MULTICAST_OP : SM90_TMA_LOAD_MULTICAST {};
// The non-executable SM90_TMA_LOAD_MULTICAST with tma_desc and no tma_mbar
// Use .with(tma_mbar, multicast_mask) to construct an executable version
template <class NumBitsPerTMA, class AuxParams_>
struct Copy_Traits<SM90_TMA_LOAD_MULTICAST, NumBitsPerTMA, AuxParams_>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_LOAD_MULTICAST arguments
TmaDescriptor tma_desc_;
using AuxParams = AuxParams_;
AuxParams aux_params_;
// Return TmaDescriptor/TensorMap
CUTE_HOST_DEVICE constexpr
TmaDescriptor const*
get_tma_descriptor() const {
return &tma_desc_;
}
// Construct an executable SM90_TMA_LOAD_MULTICAST with tma_mbar
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_TMA_LOAD_MULTICAST_OP, NumBitsPerTMA>
with(uint64_t& tma_load_mbar, uint16_t const& multicast_mask) const {
return {{}, {&tma_desc_, &tma_load_mbar, multicast_mask}};
}
// Construct an executable SM90_TMA_LOAD_MULTICAST_OP with tma_mbar (temp. overloaded for grouped gemm/ptr array gemm)
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_TMA_LOAD_MULTICAST_OP, NumBitsPerTMA>
with(TmaDescriptor const* new_tma_desc, uint64_t& tma_load_mbar, uint16_t const& multicast_mask) const {
return {{}, {new_tma_desc, &tma_load_mbar, multicast_mask}};
}
// Generate the TMA coord tensor
template <class GShape>
CUTE_HOST_DEVICE constexpr
auto
get_tma_tensor(GShape const& g_shape) const {
static_assert(is_congruent<decltype(g_shape), decltype(aux_params_.g_stride_)>::value);
return make_counting_tensor(make_layout(g_shape, aux_params_.g_stride_));
}
// Don't try to execute a copy with SM90_TMA_LOAD_MULTICAST before calling .with()
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst) = delete;
};
// The executable SM90_TMA_LOAD_MULTICAST with tma_desc and tma_mbar and multicast_mask
template <class NumBitsPerTMA>
struct Copy_Traits<SM90_TMA_LOAD_MULTICAST_OP, NumBitsPerTMA>
: TMA_LOAD_Unpack<SM90_TMA_LOAD_MULTICAST_OP>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_LOAD_MULTICAST arguments
tuple<
TmaDescriptor const*,
uint64_t*, // smem mbarrier
uint16_t // multicast mask
> const opargs_;
};
//////////////////////////////////////////////////////////////////////////////
///////////////////////////// TMA_STORE //////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// The executable SM90_TMA_STORE with tma_desc
template <class NumBitsPerTMA, class AuxParams_>
struct Copy_Traits<SM90_TMA_STORE, NumBitsPerTMA, AuxParams_>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_STORE arguments
TmaDescriptor tma_desc_;
using AuxParams = AuxParams_;
AuxParams aux_params_;
// Return TmaDescriptor/TensorMap
CUTE_HOST_DEVICE constexpr
TmaDescriptor const*
get_tma_descriptor() const {
return &tma_desc_;
}
// Generate the TMA coord tensor
template <class GShape>
CUTE_HOST_DEVICE constexpr
auto
get_tma_tensor(GShape const& g_shape) const {
static_assert(is_congruent<decltype(g_shape), decltype(aux_params_.g_stride_)>::value);
return make_counting_tensor(make_layout(g_shape, aux_params_.g_stride_));
}
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_smem<TS>::value, "Expected smem src for SM90_TMA_STORE");
//static_assert(is_gmem<TD>::value, "Expected gmem dst for SM90_TMA_STORE"); // TMA spoofed src tensor
void const* const desc_ptr = &(traits.tma_desc_);
void const* const src_ptr = cute::raw_pointer_cast(src.data());
auto dst_coord = dst.data().coord_;
#if 0
auto [c0,c1,c2,c3,c4] = append<5>(dst_coord, 0);
printf("THR (%d,%d,%d) BLK (%d,%d,%d) TMACRD (%d,%d,%d,%d,%d) SMEMADDR (%p)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
int32_t(c0), int32_t(c1), int32_t(c2), int32_t(c3), int32_t(c4), src_ptr);
#endif
return detail::explode_tuple(detail::CallCOPY<SM90_TMA_STORE>{},
make_tuple(desc_ptr, src_ptr), seq<0,1>{},
dst_coord, tuple_seq<decltype(dst_coord)>{});
}
};
//////////////////////////////////////////////////////////////////////////////
///////////////////////////// TMA_REDUCE_ADD //////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// The executable SM90_TMA_REDUCE_ADD with tma_desc
template <class NumBitsPerTMA, class AuxParams_>
struct Copy_Traits<SM90_TMA_REDUCE_ADD, NumBitsPerTMA, AuxParams_>
{
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_TMA_REDUCE_ADD arguments
TmaDescriptor tma_desc_;
using AuxParams = AuxParams_;
AuxParams aux_params_;
// Return TmaDescriptor/TensorMap
CUTE_HOST_DEVICE constexpr
TmaDescriptor const*
get_tma_descriptor() const {
return &tma_desc_;
}
// Generate the TMA coord tensor
template <class GShape>
CUTE_HOST_DEVICE constexpr
auto
get_tma_tensor(GShape const& g_shape) const {
static_assert(is_congruent<decltype(g_shape), decltype(aux_params_.g_stride_)>::value);
return make_counting_tensor(make_layout(g_shape, aux_params_.g_stride_));
}
template <class Coord, int... Is>
CUTE_HOST_DEVICE constexpr
void
copy_unpack_(void const* const src_ptr,
Coord const& dst_coord, seq<Is...>) const
{
#if 0
auto [c0,c1,c2,c3,c4] = append<5>(dst_coord, 0);
printf("THR (%d,%d,%d) BLK (%d,%d,%d) TMACRD (%d,%d,%d,%d,%d) SMEMADDR (%p)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
int32_t(c0), int32_t(c1), int32_t(c2), int32_t(c3), int32_t(c4), src_ptr);
#endif
SM90_TMA_REDUCE_ADD::copy(&tma_desc_,
src_ptr, get<Is>(dst_coord)...);
}
// This is the copy_unpack dispatch for this Copy_Traits
// Src needs to be a smem tensor
// Dst needs to be a gmem tensor with TmaCoordIterator .data()
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_smem<TS>::value, "Expected smem src for SM90_TMA_REDUCE_ADD");
//static_assert(is_gmem<TD>::value, "Expected gmem dst for SM90_TMA_REDUCE_ADD"); // TMA spoofed src tensor
traits.copy_unpack_(cute::raw_pointer_cast(src.data()), dst.data().coord_, tuple_seq<decltype(dst.data().coord_)>{});
}
};
//////////////////////////////////////////////////////////////////////////////
///////////////////////////// BULK COPY //////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
template <class NumBitsPerTMA, class... OpArgs>
struct Copy_Traits<SM90_BULK_COPY_G2S, NumBitsPerTMA, OpArgs...>
{
static_assert(int32_t(NumBitsPerTMA::value / 8) % 16 == 0,
"Bulk Copy requires copy vector size align to 16B.");
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_BULK_COPY_G2S arguments
// 0: uint64_t* bulk_load_memory_barrier
cute::tuple<OpArgs...> bulk_load_mbar_;
// Record the memory barrier for the instruction
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_BULK_COPY_G2S, NumBitsPerTMA, uint64_t*>
with(uint64_t& bulk_mbar) const {
return {{&bulk_mbar}};
}
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_same<cute::tuple<OpArgs...>, cute::tuple<uint64_t*>>::value,
"Extra arguments not set. Set .with() before use.");
static_assert(is_gmem<TS>::value, "Expected gmem src for SM90_BULK_COPY_G2S");
static_assert(is_smem<TD>::value, "Expected smem dst for SM90_BULK_COPY_G2S");
SM90_BULK_COPY_G2S::copy(raw_pointer_cast(src.data()), get<0>(traits.bulk_load_mbar_),
raw_pointer_cast(dst.data()), int32_t(NumBitsPerTMA::value / 8));
}
};
template <class NumBitsPerTMA, class... Args>
struct Copy_Traits<SM90_BULK_COPY_G2S::PREFETCH, NumBitsPerTMA, Args...>
: Copy_Traits<SM90_BULK_COPY_G2S, NumBitsPerTMA>
{
template <class... CopyArgs>
CUTE_HOST_DEVICE
Copy_Traits(Copy_Traits<CopyArgs...> const& traits) {}
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_gmem<TS>::value, "Expected gmem src for SM90_BULK_PREFETCH");
SM90_BULK_COPY_G2S::PREFETCH::copy(raw_pointer_cast(src.data()), int32_t(NumBitsPerTMA::value / 8));
}
};
template <class NumBitsPerTMA>
struct Copy_Traits<SM90_BULK_COPY_S2G, NumBitsPerTMA>
{
static_assert(int32_t(NumBitsPerTMA::value / 8) % 16 == 0,
"Bulk Copy requires copy vector size align to 16B.");
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_smem<TS>::value, "Expected smem src for SM90_BULK_COPY_S2G");
static_assert(is_gmem<TD>::value, "Expected gmem dst for SM90_BULK_COPY_S2G");
SM90_BULK_COPY_S2G::copy(raw_pointer_cast(src.data()), raw_pointer_cast(dst.data()), int32_t(NumBitsPerTMA::value / 8));
}
};
//
// Placeholder for the bulk copy algorithm's default, auto-vectorizing behavior
//
template <class... OpArgs>
struct Copy_Traits<SM90_BULK_COPY_AUTO, OpArgs...>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,_1>, Stride<_0,_0>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,_1>, Stride<_0,_0>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// SM90_UBULK_COPY arguments
// 0: uint64_t* bulk_load_memory_barrier [if this is a BULK_LOAD_G2S]
cute::tuple<OpArgs...> opargs_;
// Record the memory barrier for the instruction
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM90_BULK_COPY_AUTO, uint64_t*>
with(uint64_t& bulk_mbar) const {
return {{&bulk_mbar}};
}
};
//
// MAKE_TMA_COPY and related
//
namespace detail {
// Custom version of coalesce that greedily combines modes only up to size-256
// Look at each element and the back of the stack (in order of priority)
// back(NewLayout) get<I>(OldLayout)
// s0:d0 _1:d1 => continue
// _1:d0 s1:d1 => replace_back s1:d1
// s0:d0 s1:s0*d0 => replace_back s0*s1:d0 if s0*s1 <= 256
// s0:d0 s1:d1 => append s1:d1
//
// @pre OldShape and OldStride are flat
template <int I, class OldShape, class OldStride, class NewShape, class NewStride>
CUTE_HOST_DEVICE constexpr
auto
coalesce_256_impl(OldShape const& old_shape, OldStride const& old_stride,
NewShape const& new_shape, NewStride const& new_stride)
{
if constexpr (I == rank_v<OldShape>) {
// Base case, we're done
if constexpr (is_constant<1, NewShape>::value) {
return Layout<_1,_0>{};
} else {
return Layout<NewShape,NewStride>{new_shape,new_stride};
}
} else if constexpr (is_constant<1, decltype(get<I>(old_shape))>::value) {
// shape<I>(layout) == _1, skip it and continue
return coalesce_256_impl<I+1>(old_shape, old_stride, new_shape, new_stride);
} else if constexpr (is_constant<1, NewShape>::value) {
// Replace our shape-1 with anything (Can only happen on input new_shape/new_stride)
return coalesce_256_impl<I+1>(old_shape, old_stride, get<I>(old_shape), get<I>(old_stride));
} else if constexpr (is_constant<true, decltype(back(new_shape) * back(new_stride) == get<I>(old_stride) &&
get<I>(old_shape) * back(new_shape) <= Int<256>{})>::value) {
// Merge modes because the shapes and strides match and the merge is 256 or less
return coalesce_256_impl<I+1>(old_shape, old_stride,
replace_back(new_shape, get<I>(old_shape) * back(new_shape)),
new_stride);
} else {
// Can't replace or merge, so append a new mode
return coalesce_256_impl<I+1>(old_shape, old_stride,
append(new_shape, get<I>(old_shape)),
append(new_stride, get<I>(old_stride)));
}
CUTE_GCC_UNREACHABLE;
}
// Combine all the modes that are possible to combine
// Does not respect the profile of the layout, but does preserve total size
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
coalesce_256(Layout<Shape,Stride> const& layout)
{
auto flat_shape = flatten(layout.shape());
auto flat_stride = flatten(layout.stride());
return coalesce_256_impl<1>(flat_shape, flat_stride, get<0>(flat_shape), get<0>(flat_stride));
}
template <class TmaInternalType,
class GEngine, class GLayout,
class SShape, class SStride,
class VShape, class VStride>
CUTE_HOST_DEVICE constexpr
auto
construct_tma_gbasis(Tensor<GEngine,GLayout> const& gtensor, // The original GMEM Tensor
Layout<SShape,SStride> const& slayout, // The layout of SMEM
Layout<VShape,VStride> const& cta_v_map) // smem_idx to hier gmode
{
//
// TMA parameter checking
//
CUTE_STATIC_ASSERT_V(product_each(shape(slayout)) == product_each(shape(cta_v_map)),
"TMA requires CTA_Tile and SLayout top-level shape equivalence.");
#if 0
print("gtensor : "); print(gtensor); print("\n");
print("slayout : "); print(slayout); print("\n");
print("cta_v_map : "); print(cta_v_map); print("\n");
#endif
//
// TMA slayout manipulation
//
// Invert the smem to get the largest contiguous vector in the smem layout
// smem idx -> smem coord
auto inv_smem_layout = right_inverse(get_nonswizzle_portion(slayout));
// Compose with the V-Map to convert smem coord (CTA val idx) to gmem mode
// smem idx -> gmem mode
auto sidx2gmode_full = coalesce(composition(cta_v_map, inv_smem_layout));
#if 0
print("inv_smem_layout : "); print(inv_smem_layout); print("\n");
print("sidx2gmode_full : "); print(sidx2gmode_full); print("\n");
#endif
//
// TMA gtensor truncation
//
// Truncate any incompatibilities -- no starting in the middle of gmodes
auto smem_rank = find_if(stride(sidx2gmode_full), [](auto e) {
[[maybe_unused]] auto v = basis_value(e);
return not is_constant<1,decltype(v)>{};
});
static_assert(smem_rank > 0, "Could not find a common tile-gmem vectorization. Does the Tile select out major GMEM modes?");
// Keep only the static-1 basis modes into gmem
auto sidx2gmode = take<0,smem_rank>(sidx2gmode_full);
#if 0
print("smem_rank : "); print(smem_rank); print("\n");
print("sidx2gmode : "); print(sidx2gmode); print("\n");
#endif
//
// TMA gtensor manipulation
//
// The smem vector is the same units as gtensor, so compose first and then recast
// tma_val_idx:gmem_strides
auto tile_gstride = recast<TmaInternalType>(gtensor.compose(sidx2gmode)).layout();
// Coalesce modes up to size-256 (the maximum TMA box extent in units of TmaInternalType)
// tma_box_shape:gmem_strides
auto tma_gstride = coalesce_256(tile_gstride);
// Perform the tiling, recast, and coalesce to the gmem vector again, but with indirections to the gtensor modes
auto gbasis = make_identity_layout(shape(gtensor));
auto tile_gbasis_tmp = gbasis.compose(sidx2gmode);
// Instead of the recast (gbasis doesn't have type info), replace the shape with the already-recasted shape
// tma_box_shape:gmem_mode
auto tile_gbasis = make_layout(shape(tile_gstride), stride(tile_gbasis_tmp));
// "Coalesce" the tile basis into a compatible shape with the tma_gstride
auto tma_gbasis_tile = tile_gbasis.compose(make_layout(wrap(shape(tma_gstride))));
// Recast the original tensor for shape/stride inspections
Tensor gtensor_T = recast<TmaInternalType>(gtensor);
// Find missing bases that don't appear in tile_gbasis
auto tile_gbasis_remaining_stride = filter_tuple(flatten(shape (gtensor_T)), flatten(stride(gtensor_T)),
flatten(stride(gbasis)),
[&](auto s, auto d, auto e)
{
if constexpr (is_constant<1, decltype(s)>::value || is_constant<0, decltype(d)>::value) {
return cute::tuple<>{}; // If size-1 or stride-0, then don't append
} else {
using E = decltype(e);
auto has_e = any_of(flatten(stride(tma_gbasis_tile)), [] (auto tb) { return tb == E{}; });
if constexpr (decltype(has_e)::value) {
return cute::tuple<>{}; // If d was found, then don't append
} else {
return cute::tuple<E>(e); // Else, this is missing so append
}
}
});
// Append the remaining basis modes that contribute to the TMA with size-1
auto tile_gbasis_remaining_shape = repeat<rank(tile_gbasis_remaining_stride)>(Int<1>{});
auto tma_gbasis_full = make_layout(tuple_cat(wrap( shape(tma_gbasis_tile)), wrap(tile_gbasis_remaining_shape )),
tuple_cat(wrap(stride(tma_gbasis_tile)), wrap(tile_gbasis_remaining_stride)));
// Group the trailing modes to make this max rank-5 -- TMA rank limitation
// tma_box_shape:gmem_mode
auto tma_gbasis = group<cute::min(rank(tma_gbasis_full),4),-1>(tma_gbasis_full);
#if 0
print("tile_gstride : "); print(tile_gstride); print("\n");
print("tma_gstride : "); print(tma_gstride); print("\n");
print("gbasis : "); print(gbasis); print("\n");
print("tile_gbasis : "); print(tma_gbasis_tile); print("\n");
print("tma_gbasis : "); print(tma_gbasis); print("\n");
#endif
return tma_gbasis;
}
template <class GEngine, class GLayout,
class TmaGmemBasisStride,
class ShapeT, size_t TmaRank>
CUTE_HOST_DEVICE constexpr
void
fill_tma_gmem_shape_stride(Tensor<GEngine,GLayout> const& gtensor, // Gmem Shapes and Strides, in units of TmaInternalType
TmaGmemBasisStride const& tma_gbasis_stride, // Map Tma mode idx -> Gmem mode(s)
cute::array<ShapeT, TmaRank> & gmem_prob_shape, // Tma Shapes, uint32_t or uin64_t
cute::array<uint64_t, TmaRank> & gmem_prob_stride) // Tma Strides
{
static_assert(is_tuple<TmaGmemBasisStride>::value);
static_assert(is_same<uint32_t, ShapeT>::value || is_same<uint64_t, ShapeT>::value);
using TmaInternalType = typename GEngine::value_type;
constexpr int tma_rank = decltype(rank(tma_gbasis_stride))::value;
static_assert(TmaRank >= tma_rank);
auto gmem_shape = shape(gtensor);
auto gmem_stride = stride(gtensor);
// Use the indirections in tma_gbasis_stride into gtensor to construct the tma gmem shapes/strides
for_each(make_seq<tma_rank>{}, [&](auto i) {
constexpr int tma_i_rank = decltype(rank<i>(tma_gbasis_stride))::value;
if constexpr (tma_i_rank == 1) {
// Trivial contribution of this gmem mode to this tma mode
auto ej = unwrap(get<i>(tma_gbasis_stride));
gmem_prob_shape[i] = basis_get(ej, gmem_shape);
gmem_prob_stride[i] = basis_get(ej, gmem_stride);
} else {
// Apply a recurrence to each gmem mode that contributes to this tma mode
for_each(get<i>(tma_gbasis_stride), [&](auto ej) {
// Problem shape
uint64_t shape_j = basis_get(ej, gmem_shape);
// Problem stride (in bytes)
uint64_t stride_j = basis_get(ej, gmem_stride);
uint64_t old_stride = gmem_prob_stride[i];
gmem_prob_stride[i] = gcd(gmem_prob_stride[i], stride_j);
if (gmem_prob_stride[i] != 0) {
// Recurrence: g_shape = (s_i - 1) * (d_i / gcd_j d_j) + 1
gmem_prob_shape[i] = (gmem_prob_shape[i]-1) * (old_stride / gmem_prob_stride[i])
+ (shape_j-1) * (stride_j / gmem_prob_stride[i])
+ 1;
} else {
gmem_prob_shape[i] = shape_j;
}
});
}
});
}
// Overload for an existing Copy_Traits
template <class GEngine, class GLayout,
class Op, class Bits, class Aux,
class ShapeT, size_t TmaRank>
CUTE_HOST_DEVICE constexpr
void
fill_tma_gmem_shape_stride(Copy_Traits<Op,Bits,Aux> const& tma_traits,
Tensor<GEngine,GLayout> const& gtensor, // Gmem Shapes and Strides, value_type = TmaInternalType
cute::array<ShapeT, TmaRank> & gmem_prob_shape, // Tma Shapes, uint32_t or uin64_t
cute::array<uint64_t, TmaRank> & gmem_prob_stride) // Tma Strides
{
return fill_tma_gmem_shape_stride(gtensor, stride(typename Aux::TmaGmemBasis{}),
gmem_prob_shape, gmem_prob_stride);
}
// Use a sidx2gmode to read through the GMEM tensor
// and construct a TMA Descriptor for the resulting instruction
// At the same time, construct the Tma Tensor's Stride to generate
// the TMA coordinates that the instruction consumes.
//
template <class TmaInternalType,
class GEngine, class GLayout,
class TShape, class TStride,
int B, int M, int S>
CUTE_HOST_RTC
auto
make_tma_copy_desc(Tensor<GEngine,GLayout> const& gtensor, // The original GMEM Tensor
Layout<TShape,TStride> const& tma_gbasis, // TMA mode -> GMEM mode mapping
Swizzle<B,M,S> const& swizzle, // Swizzle fn on smem_idx
uint32_t num_multicast) // The number of CTAs in multicasting
{
//
// TMA desc creation
//
constexpr int tma_dim = decltype(rank(tma_gbasis))::value;
//
// TMA gmem desc info
//
// Recast the original tensor for shape/stride inspections
Tensor gtensor_T = recast<TmaInternalType>(gtensor);
void* gmem_address = (void*) raw_pointer_cast(gtensor_T.data());
auto gmem_layout = gtensor_T.layout();
cute::array<uint64_t, 5> gmem_prob_shape = {1,1,1,1,1};
cute::array<uint64_t, 5> gmem_prob_stride = {0,0,0,0,0};
fill_tma_gmem_shape_stride(gtensor_T, stride(tma_gbasis), gmem_prob_shape, gmem_prob_stride);
assert((reinterpret_cast<uint64_t>(gmem_address) & 0b1111) == 0); // Address must be 16B-aligned
assert(gmem_prob_shape[0] >= (uint64_t(1))); // Size must be min 1
assert(gmem_prob_shape[0] <= (uint64_t(1) << 32)); // Size must be max 2^32
assert(gmem_prob_shape[1] >= (uint64_t(1))); // Size must be min 1
assert(gmem_prob_shape[1] <= (uint64_t(1) << 32)); // Size must be max 2^32
assert(gmem_prob_shape[2] >= (uint64_t(1))); // Size must be min 1
assert(gmem_prob_shape[2] <= (uint64_t(1) << 32)); // Size must be max 2^32
assert(gmem_prob_shape[3] >= (uint64_t(1))); // Size must be min 1
assert(gmem_prob_shape[3] <= (uint64_t(1) << 32)); // Size must be max 2^32
assert(gmem_prob_shape[4] >= (uint64_t(1))); // Size must be min 1
assert(gmem_prob_shape[4] <= (uint64_t(1) << 32)); // Size must be max 2^32
// TMA descriptor does not store the zeroth stride and assumes it is 1 (TmaInternalType element).
assert(gmem_prob_stride[0] == 1 && "Majorness of smem doesn't match majorness of gmem");
// convert strides to byte strides
for(uint64_t& stride : gmem_prob_stride) {
stride = (stride * sizeof_bits_v<TmaInternalType>) / 8;
}
// Assert the byte strides. Tma Descriptor uses byte strides
assert((gmem_prob_stride[1]) < (uint64_t(1) << 40)); // Stride must be max 2^40
assert((gmem_prob_stride[1] & 0b1111) == 0); // Stride must be multiple of 16B (128b)
assert((gmem_prob_stride[2]) < (uint64_t(1) << 40)); // Stride must be max 2^40
assert((gmem_prob_stride[2] & 0b1111) == 0); // Stride must be multiple of 16B (128b)
assert((gmem_prob_stride[3]) < (uint64_t(1) << 40)); // Stride must be max 2^40
assert((gmem_prob_stride[3] & 0b1111) == 0); // Stride must be multiple of 16B (128b)
assert((gmem_prob_stride[4]) < (uint64_t(1) << 40)); // Stride must be max 2^40
assert((gmem_prob_stride[4] & 0b1111) == 0); // Stride must be multiple of 16B (128b)
//
// TMA smem desc info
//
cute::array<uint32_t, 5> smem_box_shape = {1,1,1,1,1};
cute::array<uint32_t, 5> smem_box_stride = {1,1,1,1,1};
// The smem box is simply given by the sizes of the modes in tma_gbasis
for_each(make_seq<tma_dim>{}, [&](auto i) {
smem_box_shape[i] *= size<i>(tma_gbasis);
});
// Finally, truncate the tma box by the num_multicast
for (uint32_t i = tma_dim-1, multicast = num_multicast; multicast > 1; --i) {
assert(smem_box_shape[i] % multicast == 0 || multicast % smem_box_shape[i] == 0);
uint32_t new_mult = ceil_div(multicast, smem_box_shape[i]);
smem_box_shape[i] = ceil_div(smem_box_shape[i], multicast);
multicast = new_mult;
}
assert(smem_box_shape[0] >= (uint32_t(1))); // Size must be min 1
assert(smem_box_shape[0] <= (uint32_t(1) << 8)); // Size must be max 2^8 = 256
assert(smem_box_shape[1] >= (uint32_t(1))); // Size must be min 1
assert(smem_box_shape[1] <= (uint32_t(1) << 8)); // Size must be max 2^8 = 256
assert(smem_box_shape[2] >= (uint32_t(1))); // Size must be min 1
assert(smem_box_shape[2] <= (uint32_t(1) << 8)); // Size must be max 2^8 = 256
assert(smem_box_shape[3] >= (uint32_t(1))); // Size must be min 1
assert(smem_box_shape[3] <= (uint32_t(1) << 8)); // Size must be max 2^8 = 256
assert(smem_box_shape[4] >= (uint32_t(1))); // Size must be min 1
assert(smem_box_shape[4] <= (uint32_t(1) << 8)); // Size must be max 2^8 = 256
assert(smem_box_stride[0] >= (uint32_t(1))); // Stride must be min 1
assert(smem_box_stride[0] <= (uint32_t(8))); // Stride must be max 2^3 = 8
assert(smem_box_stride[1] >= (uint32_t(1))); // Stride must be min 1
assert(smem_box_stride[1] <= (uint32_t(8))); // Stride must be max 2^3 = 8
assert(smem_box_stride[2] >= (uint32_t(1))); // Stride must be min 1
assert(smem_box_stride[2] <= (uint32_t(8))); // Stride must be max 2^3 = 8
assert(smem_box_stride[3] >= (uint32_t(1))); // Stride must be min 1
assert(smem_box_stride[3] <= (uint32_t(8))); // Stride must be max 2^3 = 8
assert(smem_box_stride[4] >= (uint32_t(1))); // Stride must be min 1
assert(smem_box_stride[4] <= (uint32_t(8))); // Stride must be max 2^3 = 8
//
// Construct the descriptor
//
TmaDescriptor tma_desc{};
//
// TMA general info
//
#if (__CUDACC_VER_MAJOR__ >= 12) && !defined(__CUDACC_RTC__)
CUtensorMapDataType tma_format = TMA::to_CUtensorMapDataType<TmaInternalType>();
CUtensorMapInterleave tma_interleave = CU_TENSOR_MAP_INTERLEAVE_NONE;
CUtensorMapL2promotion tma_l2Promotion = CU_TENSOR_MAP_L2_PROMOTION_L2_128B;
CUtensorMapFloatOOBfill tma_oobFill = CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE;
// TMA smem swizzle type
CUtensorMapSwizzle smem_swizzle = TMA::to_CUtensorMapSwizzle(get_tma_swizzle_bits(swizzle));
CUresult result = cuTensorMapEncodeTiled(
&tma_desc,
tma_format,
tma_dim,
gmem_address,
gmem_prob_shape.data(),
gmem_prob_stride.data() + 1, // gmem_prob_stride[0] implicitly 1
smem_box_shape.data(),
smem_box_stride.data(),
tma_interleave,
smem_swizzle,
tma_l2Promotion,
tma_oobFill);
if (result != CUDA_SUCCESS) {
std::cerr << "TMA Desc Addr: " << &tma_desc
<< "\nformat " << tma_format
<< "\ndim " << tma_dim
<< "\ngmem_address " << gmem_address
<< "\nglobalDim " << gmem_prob_shape
<< "\nglobalStrides " << gmem_prob_stride
<< "\nboxDim " << smem_box_shape
<< "\nelementStrides " << smem_box_stride
<< "\ninterleave " << tma_interleave
<< "\nswizzle " << smem_swizzle
<< "\nl2Promotion " << tma_l2Promotion
<< "\noobFill " << tma_oobFill << std::endl;
std::cerr << "Error: Failed to initialize the TMA descriptor " << result << std::endl;
assert(false);
}
#endif // (__CUDACC_VER_MAJOR__ >= 12) && !defined(__CUDACC_RTC__)
auto recast_ratio = cute::trait_ratio(sizeof_bits<typename GEngine::value_type>{},
sizeof_bits< TmaInternalType>{});
auto gbasis = make_basis_like(shape(gtensor));
// Finally, get the inverse permutation of the E<i> bases for the mocked gmem stride
auto gmem_tma_basis_stride = transform_leaf(gbasis, [&](auto ei) {
auto si = basis_get(ei, shape(gmem_layout));
auto di = basis_get(ei, stride(gmem_layout));
if constexpr (is_constant<1, decltype(si)>::value || is_constant<0, decltype(di)>::value) {
return Int<0>{}; // If size-1 or stride-0, return arithmetic identity -- no contribution to the TMA
} else {
auto tma_gmem_basis_stride = stride(tma_gbasis);
// Find j such that E<i> is in stride<j>(tma_gbasis)
using EI = decltype(ei);
[[maybe_unused]] auto j = find_if(tma_gmem_basis_stride, [&](auto tma_stride_j) { return any_of(tma_stride_j, [&](auto dj) { return dj == EI{}; }); });
if constexpr (decltype(j == rank(tma_gmem_basis_stride))::value) {
return Int<0>{}; // If not-found, return arithmetic identity -- no contribution to the TMA
} else
if constexpr (decltype(j == Int<0>{})::value) {
auto scale = recast_ratio * basis_get(ei, stride(gtensor));
return E<j>{} * scale; // Return TMA Coord basis -- with a recast scale factor
} else
if constexpr (decltype(rank<j>(tma_gmem_basis_stride) == Int<1>{})::value) {
return E<j>{}; // Return TMA Coord basis -- known scale of Int<1>{}
} else {
int32_t scale = ceil_div(int32_t(di * sizeof_bits_v<TmaInternalType> / cute::max(gmem_prob_stride[j], uint64_t{16})), 8);
return E<j>{} * scale; // Return TMA Coord basis -- with a dynamic scale factor
}
}
});
#if 0
print("gmem_tma_basis_stride : "); print(gmem_tma_basis_stride); print("\n");
#endif
using AuxParams = AuxTmaParams<decltype(gmem_tma_basis_stride),
decltype(tma_gbasis),
decltype(swizzle)>;
return cute::make_tuple(tma_desc, AuxParams{gmem_tma_basis_stride});
}
template <class TmaInternalType,
class CopyOp,
class GEngine, class GLayout,
class SLayout,
class VShape, class VStride>
CUTE_HOST_RTC
auto
make_tma_copy_atom(CopyOp,
Tensor<GEngine,GLayout> const& gtensor, // Full GMEM Tensor
SLayout const& slayout, // CTA Tile of SMEM, potentially swizzled
uint32_t const& num_multicast, // The number of CTAs involved in multicasting
Layout<VShape,VStride> const& cta_v_map) // V: CTA val idx -> gmem mode
{
//
// TMA truncated layout
//
auto smem_swizzle = get_swizzle_portion(slayout);
auto smem_layout = get_nonswizzle_portion(slayout);
auto tma_gbasis = detail::construct_tma_gbasis<TmaInternalType>(gtensor, smem_layout, cta_v_map);
//
// Construct the TMA Desc and the strides of the TMA Tensor
//
auto [tma_desc, aux_params] = detail::make_tma_copy_desc<TmaInternalType>(gtensor,
tma_gbasis,
smem_swizzle,
num_multicast);
//
// Construct the Copy_Traits
//
constexpr int num_bits_per_tma = size(tma_gbasis) * sizeof_bits_v<TmaInternalType>;
using Traits = Copy_Traits<CopyOp, cute::C<num_bits_per_tma>, decltype(aux_params)>;
using Atom = Copy_Atom<Traits, typename GEngine::value_type>;
Traits tma_traits{tma_desc, aux_params};
#if 0
print("num_bits_per_tma : "); print(num_bits_per_tma); print("\n");
print("g_stride_bases : "); print(tma_traits.aux_params_.g_stride_); print("\n");
#endif
// Return the Copy_Atom
return Atom{tma_traits};
}
// The "logical TMA tid" is a map from the CTA rank to its logical id
// within the instruction. It works like a mask or ordering on the
// CTAs. For non-multicast TMA, all CTAs should map to 0. For
// multicast TMA of size 4, CTAs will be mapped to {0,1,2,3}.
template <class TmaInternalType,
class CopyOp,
class GEngine, class GLayout,
class SLayout,
class TShape, class TStride,
class VShape, class VStride>
CUTE_HOST_RTC
auto
make_tma_copy_tiled(CopyOp const& copy_op,
Tensor<GEngine,GLayout> const& gtensor, // Full GMEM Tensor
SLayout const& slayout, // CTA Tile of SMEM
Layout<TShape,TStride> const& cta_t_map, // T: CTA thr idx -> logical TMA tid
Layout<VShape,VStride> const& cta_v_map) // V: CTA val idx -> gmem mode
{
Copy_Atom atom = make_tma_copy_atom<TmaInternalType>(copy_op, gtensor, slayout,
cosize(cta_t_map), cta_v_map);
//
// Construct the TiledCopy
//
[[maybe_unused]] auto cta_tiler = product_each(shape(cta_v_map));
auto num_elems_per_tma = size<1>(typename decltype(atom)::RefLayout{}) / static_value<sizeof_bits<typename GEngine::value_type>>();
// smem idx -> smem coord
auto inv_smem_layout = right_inverse(get_nonswizzle_portion(slayout));
// CTA V -> smem_coord
auto layout_v = composition(inv_smem_layout, num_elems_per_tma);
// Scale that up to cover all of the smem_coords
auto layout_V = tile_to_shape(make_layout(layout_v), size(cta_v_map));
// CTA T -> smem idx
auto layout_t = make_layout(cosize(cta_t_map), shape_div(num_elems_per_tma, cosize(cta_t_map)));
// CTA TID -> smem coord
auto layout_T = composition(inv_smem_layout, composition(layout_t, cta_t_map));
// Combine with the T mapping
[[maybe_unused]] auto layout_TV = make_layout(layout_T, layout_V);
#if 0
print("cta_tiler : "); print(cta_tiler); print("\n");
print("layout_v : "); print(layout_v); print("\n");
print("layout_V : "); print(layout_V); print("\n");
print("layout_t : "); print(layout_t); print("\n");
print("layout_T : "); print(layout_T); print("\n");
print("layout_TV : "); print(layout_TV); print("\n");
#endif
return TiledCopy<decltype(atom), decltype(layout_TV), decltype(cta_tiler)>{atom};
}
} // end namespace detail
/** Make a CuTe CTA-collective TiledCopy for a TMA operation.
*
* @param CopyOp The target copy operation: SM90_TMA_LOAD, SM90_TMA_LOAD_MULTICAST, SM90_TMA_STORE
* @param gtensor The GMEM Tensor to be involved in the TMA.
* @param slayout The SMEM Layout to be involved in the TMA.
* @param cta_tile The CTA-local tile that each CTA will be tiling GMEM with.
* This is often the blk_shape that is used to tile the GMEM for CTAs:
* local_tile(gtensor, blk_shape, blk_coord) -> CTA-local tile of gtensor
* @param cluster_size When using SM90_TMA_LOAD_MULTICAST, this can be a (static) power-of-2 <= 16
* defining the multicast size (used to further partition the SMEM)
* Else, static-1
*
* This code attempts to maximize the TMA box size. It does this by tracing
* the SMEM "vector" -- the inverse of the smem layout -- to find the largest
* contiguous array of smem that can be written to/from global memory given
* the constraints that the TMA instruction imposes.
*
* This is accomplished by assigning "basis" strides to the GMEM to track which
* modes of SMEM map to which modes of GMEM, then reorder the modes of GMEM according
* to the SMEM vector, and then using those GMEM/SMEM modes to fill in the desc.
*
* Examples:
using T = float;
T* gptr = nullptr;
{
// Simple 2D
Tensor gtensor = make_tensor(gptr, make_shape(1024, 256), GenRowMajor{}); // K-Major GMEM
auto slayout = make_layout(make_shape(_64{}, _32{}), GenRowMajor{}); // K-Major SMEM
auto tma = make_tma_copy(SM90_TMA_LOAD{}, gtensor, slayout);
}
{
// GMMA 2D
Tensor gtensor = make_tensor(gptr, make_shape(1024, 256)); // MN-Major GMEM
auto slayout = tile_to_shape(GMMA::Layout_MN_SW128_Atom<T>{}, make_shape(_128{},_64{})); // MN-Major Swizzled+Tiled 128x64 SMEM
auto tma = make_tma_copy(SM90_TMA_LOAD{}, gtensor, slayout);
}
{
// 3D
Tensor gtensor = make_tensor(gptr, make_shape(1024, 32, 512), make_stride(64, Int<1>{}, 65536)); // GMEM
auto slayout = make_layout(make_shape(_16{}, _8{}, _2{}), make_stride(_16{}, _1{}, _8{})); // SMEM w/ same major-mode
auto tma = make_tma_copy(SM90_TMA_LOAD{}, gtensor, slayout);
}
{
// cuTENSOR 4D
auto layout = make_shape(make_shape(32,40),make_shape(make_shape(8,8),656)); // GMEM
auto cta_tile = make_shape(_128{},make_shape(_32{},_2{})); // GMEM Tiling:
// Take 128-elem from m: m0 must divide 128,
// m-last may be predicated
// Take 32-elem from k0, 2-elem from k1
auto slayout = make_layout(cta_tile); // Col-Major SMEM
auto tma = make_tma_copy(SM90_TMA_LOAD{}, gtensor, slayout, cta_tile, Int<1>{});
}
*
* Check the TMA box size and desc:
print("TMA Box size: "); print(typename decltype(tma)::Tiler_MN{}); print("\n");
print("TMA desc : "); print(tma.tma_desc_); print("\n");
*
* Usage:
Tensor mA = tma_a.get_tma_tensor(make_shape(M,N)); // (M,N) TMA coord tensor
Tensor gA = local_tile(mA, cta_tile, cta_coord); // (BLK_M,BLK_N) TMA coord tensor for this CTA
Tensor sA = make_tensor(make_smem_ptr<T>(sptr), slayout); // (BLK_M,BLK_N) SMEM tensor
auto cta_tma = tma.get_slice(cta_idx_in_cluster); // Slice for multicast partitioning
Tensor tAgA = cta_tma.partition_S(gA); // Partition for src
Tensor tAsA = cta_tma.partition_D(sA); // Partition for dst
copy(tma.with(barrier, mcast_mask), tAgA, tAsA); // copy with supporting TMA params
*/
template <class TmaInternalType = void,
class CopyOp,
class GEngine, class GLayout,
class SLayout,
class CTA_Tiler,
class Cluster_Size>
CUTE_HOST_RTC
auto
make_tma_copy(CopyOp const& copy_op,
Tensor<GEngine,GLayout> const& gtensor,
SLayout const& slayout,
CTA_Tiler const& cta_tiler,
Cluster_Size const& cluster_size)
{
if constexpr (cute::is_same_v<CopyOp, SM90_TMA_LOAD_IM2COL> ||
cute::is_same_v<CopyOp, SM90_TMA_STORE_IM2COL>) {
return make_im2col_tma_copy(copy_op,
gtensor,
slayout,
cta_tiler,
cluster_size);
} else {
auto cta_v_tile = make_identity_layout(shape(gtensor)).compose(cta_tiler);
auto cta_t_tile = make_layout(cluster_size);
// Prefer TmaInternalType if specified. Fallback to GEngine::value_type
using TmaType = conditional_t<is_same<void, TmaInternalType>::value, typename GEngine::value_type, TmaInternalType>;
return detail::make_tma_copy_tiled<TmaType>(copy_op,
gtensor, slayout,
cta_t_tile, cta_v_tile);
}
}
// Explicit defaulting
template <class CopyOp,
class GEngine, class GLayout,
class SLayout>
CUTE_HOST_RTC
auto
make_tma_copy(CopyOp const& copy_op,
Tensor<GEngine,GLayout> const& gtensor,
SLayout const& slayout)
{
return make_tma_copy(copy_op, gtensor, slayout, product_each(shape(slayout)), Int<1>{});
}
// Explicit defaulting
template <class CopyOp,
class GEngine, class GLayout,
class SLayout,
class Cluster_Size>
CUTE_HOST_RTC
auto
make_tma_copy(CopyOp const& copy_op,
Tensor<GEngine,GLayout> const& gtensor,
SLayout const& slayout,
Cluster_Size const& cluster_size)
{
return make_tma_copy(copy_op, gtensor, slayout, product_each(shape(slayout)), cluster_size);
}
////////////////////////////////////
// Experimental Make TMA Atom and Partitioner
///////////////////////////////////
template <class TmaInternalType = void,
class CopyOp,
class GEngine, class GLayout,
class SLayout,
class CTA_Tiler,
class Cluster_Size>
CUTE_HOST_RTC
auto
make_tma_atom(CopyOp const& copy_op,
Tensor<GEngine,GLayout> const& gtensor,
SLayout const& slayout,
CTA_Tiler const& cta_tiler,
Cluster_Size const& cluster_size)
{
auto cta_v_tile = make_identity_layout(shape(gtensor)).compose(cta_tiler);
// Prefer TmaInternalType if specified. Fallback to GEngine::value_type
using TmaType = conditional_t<is_same<void, TmaInternalType>::value, typename GEngine::value_type, TmaInternalType>;
return detail::make_tma_copy_atom<TmaType>(copy_op,
gtensor, slayout,
size(cluster_size), cta_v_tile);
}
// The "VectorCopy Partitioner" for TMA
template <class... Args,
class CtaCoord,
class TShape, class TStride,
class SEngine, class SLayout,
class GEngine, class GLayout>
CUTE_DEVICE
auto
tma_partition(Copy_Atom<Args...> const& copy_atom,
CtaCoord const& cta_coord,
Layout<TShape,TStride> const& cta_layout, // T: CTA coord -> logical multicast id
Tensor<SEngine,SLayout> const& stensor, // SMEM Tensor (TMATile, Rest...)
Tensor<GEngine,GLayout> const& gtensor) // GMEM Tensor (TMATile, Rest...)
{
CUTE_STATIC_ASSERT_V(size<0>(stensor) == size<0>(gtensor));
// Invert the smem to get the largest contiguous vector in the smem layout
Layout inv_smem_layout = right_inverse(get_nonswizzle_portion(layout<0>(stensor)));
// Scale that up to cover all of the smem_coords
Layout layout_v = tile_to_shape(make_layout(inv_smem_layout), size<0>(stensor));
// Factor out the single-instrucion portion
Layout tma_layout_v = make_layout(Int<Copy_Atom<Args...>::NumValSrc>{});
auto layout_V = make_tile(logical_divide(layout_v, tma_layout_v));
// Append with _ until we cover all Rest... modes
auto glayout_V = append<rank_v<decltype(gtensor)>>(layout_V, _);
auto slayout_V = append<rank_v<decltype(stensor)>>(layout_V, _);
// Transform tile mode and coalesce
Tensor gtensor_v = coalesce(gtensor.compose(glayout_V), Shape<Shape<_1,_1>>{}); // ((TMA,TMA_Iter), Rest...)
Tensor stensor_v = coalesce(stensor.compose(slayout_V), Shape<Shape<_1,_1>>{}); // ((TMA,TMA_Iter), Rest...)
#if 0
if (thread0()) {
print("cta_coord : "); print(cta_coord); print("\n");
print("cta_layout : "); print(cta_layout); print("\n");
print("gtensor : "); print(gtensor); print("\n");
print("stensor : "); print(stensor); print("\n");
print("layout_V : "); print(layout_V); print("\n");
print("gtensor_v : "); print(gtensor_v); print("\n");
print("stensor_v : "); print(stensor_v); print("\n");
}
#endif
// Offset inside the TMA-mode for the multicast
auto multicast_offset = cta_layout(cta_coord) * (size(tma_layout_v) / cosize(cta_layout));
auto multicast_coord = make_coord(make_coord(multicast_offset, Int<0>{}));
auto scoord = append<SLayout::rank>(multicast_coord, Int<0>{});
auto gcoord = append<GLayout::rank>(multicast_coord, Int<0>{});
Tensor gresult = domain_offset(gcoord, gtensor_v);
Tensor sresult = domain_offset(scoord, stensor_v);
return cute::make_tuple(gresult, sresult);
}
// TMA Multicast Masks Calculation
template <int Mode, class CtaLayout, class CtaCoord>
CUTE_HOST_DEVICE constexpr
auto
create_tma_multicast_mask(CtaLayout const& cta_layout_vmnk,
CtaCoord const& cta_coord_vmnk)
{
auto cta_coord_slicer = replace<Mode>(cta_coord_vmnk, _);
auto [cta_layout, elected_cta] = slice_and_offset(cta_coord_slicer, cta_layout_vmnk);
// Get the instruction code
uint16_t mcast_mask = 0;
for (int i = 0; i < size(cta_layout); ++i) {
mcast_mask |= uint16_t(1) << cta_layout(i);
}
// Shift by the instruction's elected block rank (dynamic)
mcast_mask <<= elected_cta;
return mcast_mask;
}
} // end namespace cute
| include/cute/atom/copy_traits_sm90_tma.hpp/0 | {
"file_path": "include/cute/atom/copy_traits_sm90_tma.hpp",
"repo_id": "include",
"token_count": 25399
} | 20 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <vector_types.h>
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/numeric/integral_constant.hpp>
namespace cute
{
//
// dim3
//
using dim3 = ::dim3;
// MSVC doesn't define its C++ version macro to match
// its C++ language version. This means that when
// building with MSVC, dim3 isn't constexpr-friendly.
template <size_t I>
CUTE_HOST_DEVICE
#if ! defined(_MSC_VER)
constexpr
#endif
uint32_t& get(dim3& a)
{
static_assert(I < 3, "Index out of range");
if constexpr (I == 0) {
return a.x;
} else if constexpr (I == 1) {
return a.y;
} else if constexpr (I == 2) {
return a.z;
}
CUTE_GCC_UNREACHABLE;
}
template <size_t I>
CUTE_HOST_DEVICE
#if ! defined(_MSC_VER)
constexpr
#endif
uint32_t const& get(dim3 const& a)
{
static_assert(I < 3, "Index out of range");
if constexpr (I == 0) {
return a.x;
} else if constexpr (I == 1) {
return a.y;
} else if constexpr (I == 2) {
return a.z;
}
CUTE_GCC_UNREACHABLE;
}
template <size_t I>
CUTE_HOST_DEVICE
#if ! defined(_MSC_VER)
constexpr
#endif
uint32_t&& get(dim3&& a)
{
static_assert(I < 3, "Index out of range");
if constexpr (I == 0) {
return cute::move(a.x);
} else if constexpr (I == 1) {
return cute::move(a.y);
} else if constexpr (I == 2) {
return cute::move(a.z);
}
CUTE_GCC_UNREACHABLE;
}
// Specialize cute::tuple-traits for external types
template <>
struct tuple_size<dim3>
: integral_constant<size_t, 3>
{};
template <size_t I>
struct tuple_element<I, dim3>
{
using type = uint32_t;
};
//
// uint3
//
using uint3 = ::uint3;
template <size_t I>
CUTE_HOST_DEVICE constexpr
uint32_t& get(uint3& a)
{
static_assert(I < 3, "Index out of range");
if constexpr (I == 0) {
return a.x;
} else if constexpr (I == 1) {
return a.y;
} else if constexpr (I == 2) {
return a.z;
}
CUTE_GCC_UNREACHABLE;
}
template <size_t I>
CUTE_HOST_DEVICE constexpr
uint32_t const& get(uint3 const& a)
{
static_assert(I < 3, "Index out of range");
if constexpr (I == 0) {
return a.x;
} else if constexpr (I == 1) {
return a.y;
} else if constexpr (I == 2) {
return a.z;
}
CUTE_GCC_UNREACHABLE;
}
template <size_t I>
CUTE_HOST_DEVICE constexpr
uint32_t&& get(uint3&& a)
{
static_assert(I < 3, "Index out of range");
if constexpr (I == 0) {
return cute::move(a.x);
} else if constexpr (I == 1) {
return cute::move(a.y);
} else if constexpr (I == 2) {
return cute::move(a.z);
}
CUTE_GCC_UNREACHABLE;
}
// Specialize cute::tuple-traits for external types
template <>
struct tuple_size<uint3>
: integral_constant<size_t, 3>
{};
template <size_t I>
struct tuple_element<I, uint3>
{
using type = uint32_t;
};
} // end namespace cute
| include/cute/container/cuda_types.hpp/0 | {
"file_path": "include/cute/container/cuda_types.hpp",
"repo_id": "include",
"token_count": 1651
} | 21 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/numeric/numeric_types.hpp> // sizeof_bits
namespace cute
{
//
// C++20 <iterator> iterator_traits
//
namespace detail {
// Default reference type of an iterator
template <class T, class = void>
struct iter_ref { using type = decltype(*declval<T&>()); };
// Prefer to propagate ::reference
template <class T>
struct iter_ref<T,void_t<typename T::reference>> { using type = typename T::reference; };
} // end namespace detail
template <class T>
using iter_reference = detail::iter_ref<T>;
template <class T>
using iter_reference_t = typename iter_reference<T>::type;
namespace detail {
// Default element_type of an iterator
template <class T, class = void>
struct iter_e { using type = remove_reference_t<typename iter_ref<T>::type>; };
// Prefer to propagate ::element_type
template <class T>
struct iter_e<T,void_t<typename T::element_type>> { using type = typename T::element_type; };
} // end namespace detail
template <class T>
using iter_element = detail::iter_e<T>;
template <class T>
using iter_element_t = typename iter_element<T>::type;
namespace detail {
// Default value_type of an iterator
template <class T, class = void>
struct iter_v { using type = remove_cv_t<typename iter_e<T>::type>; };
// Prefer to propagate ::value_type
template <class T>
struct iter_v<T,void_t<typename T::value_type>> { using type = typename T::value_type; };
} // end namespace detail
template <class T>
using iter_value = detail::iter_v<T>;
template <class T>
using iter_value_t = typename iter_value<T>::type;
template <class Iterator>
struct iterator_traits {
using reference = iter_reference_t<Iterator>;
using element_type = iter_element_t<Iterator>;
using value_type = iter_value_t<Iterator>;
};
//
// has_dereference to determine if a type is an iterator concept
//
namespace detail {
template <class T, class = void>
struct has_dereference : CUTE_STL_NAMESPACE::false_type {};
template <class T>
struct has_dereference<T, void_t<decltype(*declval<T&>())>> : CUTE_STL_NAMESPACE::true_type {};
} // end namespace detail
template <class T>
using has_dereference = detail::has_dereference<T>;
//
// raw_pointer_cast
//
template <class T>
CUTE_HOST_DEVICE constexpr
T*
raw_pointer_cast(T* ptr) {
return ptr;
}
//
// A very simplified iterator adaptor.
// Derived classed may override methods, but be careful to reproduce interfaces exactly.
// Clients should never have an instance of this class. Do not write methods that take this as a param.
//
template <class Iterator, class DerivedType>
struct iter_adaptor
{
using iterator = Iterator;
using reference = typename iterator_traits<iterator>::reference;
using element_type = typename iterator_traits<iterator>::element_type;
using value_type = typename iterator_traits<iterator>::value_type;
iterator ptr_;
CUTE_HOST_DEVICE constexpr
iter_adaptor(iterator ptr = {}) : ptr_(ptr) {}
CUTE_HOST_DEVICE constexpr
reference operator*() const { return *ptr_; }
template <class Index>
CUTE_HOST_DEVICE constexpr
reference operator[](Index const& i) const { return ptr_[i]; }
template <class Index>
CUTE_HOST_DEVICE constexpr
DerivedType operator+(Index const& i) const { return {ptr_ + i}; }
CUTE_HOST_DEVICE constexpr
iterator get() const { return ptr_; }
CUTE_HOST_DEVICE constexpr
friend bool operator==(DerivedType const& x, DerivedType const& y) { return x.ptr_ == y.ptr_; }
CUTE_HOST_DEVICE constexpr
friend bool operator!=(DerivedType const& x, DerivedType const& y) { return x.ptr_ != y.ptr_; }
CUTE_HOST_DEVICE constexpr
friend bool operator< (DerivedType const& x, DerivedType const& y) { return x.ptr_ < y.ptr_; }
CUTE_HOST_DEVICE constexpr
friend bool operator<=(DerivedType const& x, DerivedType const& y) { return x.ptr_ <= y.ptr_; }
CUTE_HOST_DEVICE constexpr
friend bool operator> (DerivedType const& x, DerivedType const& y) { return x.ptr_ > y.ptr_; }
CUTE_HOST_DEVICE constexpr
friend bool operator>=(DerivedType const& x, DerivedType const& y) { return x.ptr_ >= y.ptr_; }
};
template <class I, class D>
CUTE_HOST_DEVICE constexpr
auto
raw_pointer_cast(iter_adaptor<I,D> const& x) {
return raw_pointer_cast(x.ptr_);
}
//
// counting iterator -- quick and dirty
//
template <class T = int>
struct counting_iterator
{
using index_type = T;
using value_type = T;
using reference = T;
index_type n_;
CUTE_HOST_DEVICE constexpr
counting_iterator(index_type n = 0) : n_(n) {}
CUTE_HOST_DEVICE constexpr
index_type operator*() const { return n_; }
CUTE_HOST_DEVICE constexpr
index_type operator[](index_type i) const { return n_ + i; }
CUTE_HOST_DEVICE constexpr
counting_iterator operator+(index_type i) const { return {n_ + i}; }
CUTE_HOST_DEVICE constexpr
counting_iterator& operator++() { ++n_; return *this; }
CUTE_HOST_DEVICE constexpr
counting_iterator operator++(int) { counting_iterator ret = *this; ++n_; return ret; }
CUTE_HOST_DEVICE constexpr
friend bool operator==(counting_iterator const& x, counting_iterator const& y) { return x.n_ == y.n_; }
CUTE_HOST_DEVICE constexpr
friend bool operator!=(counting_iterator const& x, counting_iterator const& y) { return x.n_ != y.n_; }
CUTE_HOST_DEVICE constexpr
friend bool operator< (counting_iterator const& x, counting_iterator const& y) { return x.n_ < y.n_; }
CUTE_HOST_DEVICE constexpr
friend bool operator<=(counting_iterator const& x, counting_iterator const& y) { return x.n_ <= y.n_; }
CUTE_HOST_DEVICE constexpr
friend bool operator> (counting_iterator const& x, counting_iterator const& y) { return x.n_ > y.n_; }
CUTE_HOST_DEVICE constexpr
friend bool operator>=(counting_iterator const& x, counting_iterator const& y) { return x.n_ >= y.n_; }
};
template <class T>
CUTE_HOST_DEVICE constexpr
T
raw_pointer_cast(counting_iterator<T> const& x) {
return x.n_;
}
//
// Display utilities
//
template <class T>
CUTE_HOST_DEVICE void print(T const* const ptr)
{
printf("ptr["); print(sizeof_bits<T>::value); printf("b](%p)", ptr);
}
template <class T>
CUTE_HOST_DEVICE void print(counting_iterator<T> ptr)
{
printf("counting_iter("); print(ptr.n_); printf(")");
}
#if !defined(__CUDACC_RTC__)
template <class T>
CUTE_HOST std::ostream& operator<<(std::ostream& os, counting_iterator<T> ptr)
{
return os << "counting_iter(" << ptr.n_ << ")";
}
#endif // !defined(__CUDACC_RTC__)
} // end namespace cute
| include/cute/pointer_base.hpp/0 | {
"file_path": "include/cute/pointer_base.hpp",
"repo_id": "include",
"token_count": 2732
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Architecture-specific operators on memory
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/cache_operation.h"
#include "cutlass/platform/platform.h"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Fragment type to store loaded data
typename AccessType,
/// The bytes of loading
int LoadBytes,
/// Cache operation
CacheOperation::Kind cache_op = CacheOperation::Always
>
struct global_load;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
#if (((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 4)) || \
(__CUDACC_VER_MAJOR__ > 11)) && \
defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750)
#define CUTLASS_ENABLE_L2_PREFETCH 1
#else
#define CUTLASS_ENABLE_L2_PREFETCH 0
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
// The redundant mov PTX instruction is used to enforce the compiler to
// keep the initializing code before ld.global
template <typename AccessType>
struct global_load<AccessType,
32,
CacheOperation::Always
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint4 *data = reinterpret_cast<uint4 *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %9, 0;\n"
" mov.b32 %0, %10;\n"
" mov.b32 %1, %11;\n"
" mov.b32 %2, %12;\n"
" mov.b32 %3, %13;\n"
" mov.b32 %4, %14;\n"
" mov.b32 %5, %15;\n"
" mov.b32 %6, %16;\n"
" mov.b32 %7, %17;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.v4.u32 {%0, %1, %2, %3}, [%8];\n"
" @p ld.global.L2::128B.v4.u32 {%4, %5, %6, %7}, [%18];\n"
#else
" @p ld.global.v4.u32 {%0, %1, %2, %3}, [%8];\n"
" @p ld.global.v4.u32 {%4, %5, %6, %7}, [%18];\n"
#endif
"}\n"
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w),
"=r"(data[1].x), "=r"(data[1].y), "=r"(data[1].z), "=r"(data[1].w)
: "l"(ptr), "r"((int)pred_guard), "r"(data[0].x), "r"(data[0].y),
"r"(data[0].z), "r"(data[0].w), "r"(data[1].x), "r"(data[1].y),
"r"(data[1].z), "r"(data[1].w), "l"(((uint8_t *)ptr) + 16));
}
};
template <typename AccessType>
struct global_load<AccessType,
32,
CacheOperation::LastUse
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint4 *data = reinterpret_cast<uint4 *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %9, 0;\n"
" mov.b32 %0, %10;\n"
" mov.b32 %1, %11;\n"
" mov.b32 %2, %12;\n"
" mov.b32 %3, %13;\n"
" mov.b32 %4, %14;\n"
" mov.b32 %5, %15;\n"
" mov.b32 %6, %16;\n"
" mov.b32 %7, %17;\n"
" @p ld.global.lu.v4.u32 {%0, %1, %2, %3}, [%8];\n"
" @p ld.global.lu.v4.u32 {%4, %5, %6, %7}, [%18];\n"
"}\n"
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w),
"=r"(data[1].x), "=r"(data[1].y), "=r"(data[1].z), "=r"(data[1].w)
: "l"(ptr), "r"((int)pred_guard), "r"(data[0].x), "r"(data[0].y),
"r"(data[0].z), "r"(data[0].w), "r"(data[1].x), "r"(data[1].y),
"r"(data[1].z), "r"(data[1].w), "l"(((uint8_t *)ptr) + 16));
}
};
template <typename AccessType>
struct global_load<AccessType,
16,
CacheOperation::Always
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint4 &data = reinterpret_cast<uint4 &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" mov.b32 %0, %6;\n"
" mov.b32 %1, %7;\n"
" mov.b32 %2, %8;\n"
" mov.b32 %3, %9;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.v4.u32 {%0, %1, %2, %3}, [%4];\n"
#else
" @p ld.global.v4.u32 {%0, %1, %2, %3}, [%4];\n"
#endif
"}\n"
: "=r"(data.x), "=r"(data.y), "=r"(data.z), "=r"(data.w)
: "l"(ptr), "r"((int)pred_guard), "r"(data.x), "r"(data.y), "r"(data.z), "r"(data.w));
}
};
template <typename AccessType>
struct global_load<AccessType,
16,
CacheOperation::LastUse
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint4 &data = reinterpret_cast<uint4 &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" mov.b32 %0, %6;\n"
" mov.b32 %1, %7;\n"
" mov.b32 %2, %8;\n"
" mov.b32 %3, %9;\n"
" @p ld.global.lu.v4.u32 {%0, %1, %2, %3}, [%4];\n"
"}\n"
: "=r"(data.x), "=r"(data.y), "=r"(data.z), "=r"(data.w)
: "l"(ptr), "r"((int)pred_guard), "r"(data.x), "r"(data.y), "r"(data.z), "r"(data.w));
}
};
template <typename AccessType>
struct global_load<AccessType,
8,
CacheOperation::Always
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint2 &data = reinterpret_cast<uint2 &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %3, 0;\n"
" mov.b32 %0, %4;\n"
" mov.b32 %1, %5;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.v2.u32 {%0, %1}, [%2];\n"
#else
" @p ld.global.v2.u32 {%0, %1}, [%2];\n"
#endif
"}\n"
: "=r"(data.x), "=r"(data.y)
: "l"(ptr), "r"((int)pred_guard), "r"(data.x), "r"(data.y));
}
};
template <typename AccessType>
struct global_load<AccessType,
8,
CacheOperation::LastUse
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint2 &data = reinterpret_cast<uint2 &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %3, 0;\n"
" mov.b32 %0, %4;\n"
" mov.b32 %1, %5;\n"
" @p ld.global.lu.v2.u32 {%0, %1}, [%2];\n"
"}\n"
: "=r"(data.x), "=r"(data.y)
: "l"(ptr), "r"((int)pred_guard), "r"(data.x), "r"(data.y));
}
};
template <typename AccessType>
struct global_load<AccessType,
4,
CacheOperation::Always
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
unsigned &data = reinterpret_cast<unsigned &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" mov.b32 %0, %3;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.u32 %0, [%1];\n"
#else
" @p ld.global.u32 %0, [%1];\n"
#endif
"}\n"
: "=r"(data)
: "l"(ptr), "r"((int)pred_guard), "r"(data));
}
};
template <typename AccessType>
struct global_load<AccessType,
4,
CacheOperation::LastUse
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
unsigned &data = reinterpret_cast<unsigned &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" mov.b32 %0, %3;\n"
" @p ld.global.lu.u32 %0, [%1];\n"
"}\n"
: "=r"(data)
: "l"(ptr), "r"((int)pred_guard), "r"(data));
}
};
template <typename AccessType>
struct global_load<AccessType,
2,
CacheOperation::Always
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint16_t &data = reinterpret_cast<uint16_t &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" mov.b16 %0, %3;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.u16 %0, [%1];\n"
#else
" @p ld.global.u16 %0, [%1];\n"
#endif
"}\n"
: "=h"(data)
: "l"(ptr), "r"((int)pred_guard), "h"(data));
}
};
template <typename AccessType>
struct global_load<AccessType,
2,
CacheOperation::LastUse
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint16_t &data = reinterpret_cast<uint16_t &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" mov.b16 %0, %3;\n"
" @p ld.global.lu.u16 %0, [%1];\n"
"}\n"
: "=h"(data)
: "l"(ptr), "r"((int)pred_guard), "h"(data));
}
};
template <typename AccessType>
struct global_load<AccessType,
1,
CacheOperation::Always
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
if (pred_guard) D = *(reinterpret_cast<AccessType const *>(ptr));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Fragment type to store data
typename AccessType,
/// The bytes of storing
int StoreBytes
>
struct global_store;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType>
struct global_store<AccessType, 64> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint4 const *data = reinterpret_cast<uint4 const *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" @p st.global.v4.u32 [%0], {%1, %2, %3, %4};\n"
" @p st.global.v4.u32 [%6], {%7, %8, %9, %10};\n"
" @p st.global.v4.u32 [%11], {%12, %13, %14, %15};\n"
" @p st.global.v4.u32 [%16], {%17, %18, %19, %20};\n"
"}\n"
:
: "l"(ptr), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z),
"r"(data[0].w), "r"((int)pred_guard), "l"(((uint8_t *)ptr) + 16),
"r"(data[1].x), "r"(data[1].y), "r"(data[1].z), "r"(data[1].w),
"l"(((uint8_t *)ptr) + 32),
"r"(data[2].x), "r"(data[2].y), "r"(data[2].z), "r"(data[2].w),
"l"(((uint8_t *)ptr) + 48),
"r"(data[3].x), "r"(data[3].y), "r"(data[3].z), "r"(data[3].w));
}
};
template <typename AccessType>
struct global_store<AccessType, 32> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint4 const *data = reinterpret_cast<uint4 const *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" @p st.global.v4.u32 [%0], {%1, %2, %3, %4};\n"
" @p st.global.v4.u32 [%6], {%7, %8, %9, %10};\n"
"}\n"
:
: "l"(ptr), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z),
"r"(data[0].w), "r"((int)pred_guard), "l"(((uint8_t *)ptr) + 16),
"r"(data[1].x), "r"(data[1].y), "r"(data[1].z), "r"(data[1].w));
}
};
template <typename AccessType>
struct global_store<AccessType, 16> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint4 const &data = reinterpret_cast<uint4 const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" @p st.global.v4.u32 [%0], {%1, %2, %3, %4};\n"
"}\n"
:
: "l"(ptr), "r"(data.x), "r"(data.y), "r"(data.z), "r"(data.w), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 8> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint2 const &data = reinterpret_cast<uint2 const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %3, 0;\n"
" @p st.global.v2.u32 [%0], {%1, %2};\n"
"}\n"
:
: "l"(ptr), "r"(data.x), "r"(data.y), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 4> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint32_t const &data = reinterpret_cast<uint32_t const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" @p st.global.u32 [%0], %1;\n"
"}\n"
:
: "l"(ptr), "r"(data), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 2> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint16_t const &data = reinterpret_cast<uint16_t const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" @p st.global.u16 [%0], %1;\n"
"}\n"
:
: "l"(ptr), "h"(data), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 1> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
if (pred_guard) *(reinterpret_cast<AccessType *>(ptr)) = D;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// ld.shared
template <int Bytes>
CUTLASS_DEVICE
void shared_load(void *dst, uint32_t ptr);
/// ld.shared - 16b
template <>
CUTLASS_DEVICE
void shared_load<2>(void *dst, uint32_t ptr) {
asm volatile("ld.shared.u16 %0, [%1];\n"
: "=h"(*reinterpret_cast<uint16_t *>(dst))
: "r"(ptr));
}
/// ld.shared - 32b
template <>
CUTLASS_DEVICE
void shared_load<4>(void *dst, uint32_t ptr) {
asm volatile("ld.shared.u32 %0, [%1];\n"
: "=r"(*reinterpret_cast<uint32_t *>(dst))
: "r"(ptr));
}
/// ld.shared - 64b
template <>
CUTLASS_DEVICE
void shared_load<8>(void *dst, uint32_t ptr) {
uint2 *dst_u64 = reinterpret_cast<uint2 *>(dst);
asm volatile("ld.shared.v2.u32 {%0, %1}, [%2];\n"
:
"=r"(dst_u64->x),
"=r"(dst_u64->y)
: "r"(ptr));
}
/// ld.shared - 128b
template <>
CUTLASS_DEVICE
void shared_load<16>(void *dst, uint32_t ptr) {
uint4 *dst_u128 = reinterpret_cast<uint4 *>(dst);
asm volatile("ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
:
"=r"(dst_u128->x),
"=r"(dst_u128->y),
"=r"(dst_u128->z),
"=r"(dst_u128->w)
: "r"(ptr));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// st.shared
template <int Bytes>
CUTLASS_DEVICE
void shared_store(uint32_t ptr, void const *src);
/// st.shared - 16b
template <>
CUTLASS_DEVICE
void shared_store<2>(uint32_t ptr, void const *src) {
asm volatile("st.shared.u16 [%0], %1;\n"
: :
"r"(ptr),
"h"(*reinterpret_cast<uint16_t const *>(src))
);
}
/// st.shared - 32b
template <>
CUTLASS_DEVICE
void shared_store<4>(uint32_t ptr, void const *src) {
asm volatile("st.shared.u32 [%0], %1;\n"
: :
"r"(ptr),
"r"(*reinterpret_cast<uint32_t const *>(src))
);
}
/// st.shared - 64b
template <>
CUTLASS_DEVICE
void shared_store<8>(uint32_t ptr, void const *src) {
uint2 const *dst_u64 = reinterpret_cast<uint2 const *>(src);
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
: :
"r"(ptr),
"r"(dst_u64->x),
"r"(dst_u64->y)
);
}
/// st.shared - 128b
template <>
CUTLASS_DEVICE
void shared_store<16>(uint32_t ptr, void const *src) {
uint4 const *dst_u128 = reinterpret_cast<uint4 const *>(src);
asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n"
: :
"r"(ptr),
"r"(dst_u128->x),
"r"(dst_u128->y),
"r"(dst_u128->z),
"r"(dst_u128->w)
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/memory_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/arch/memory.h/0 | {
"file_path": "include/cutlass/arch/memory.h",
"repo_id": "include",
"token_count": 8863
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Boost-style constant definitions for floating-point types.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/complex.h"
///////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace constants {
///////////////////////////////////////////////////////////////////////////////////
//
// Primary templates
//
/// Returns 1, the multiplicative identity element
template <typename T> CUTLASS_HOST_DEVICE T one();
/// Returns 0, the additive identity element
template <typename T> CUTLASS_HOST_DEVICE T zero();
/// Returns 2
template <typename T> CUTLASS_HOST_DEVICE T two();
/// Returns pi, approximately 3.141
template <typename T> CUTLASS_HOST_DEVICE T pi();
/// Returns 2 * pi
template <typename T> CUTLASS_HOST_DEVICE T two_pi();
/// Returns pi / 2
template <typename T> CUTLASS_HOST_DEVICE T half_pi();
/// Returns sqrt(pi)
template <typename T> CUTLASS_HOST_DEVICE T root_pi();
/// Returns sqrt(pi / 2)
template <typename T> CUTLASS_HOST_DEVICE T root_half_pi();
/// Returns sqrt(2 * pi)
template <typename T> CUTLASS_HOST_DEVICE T root_two_pi();
/// Returns sqrt(ln(4))
template <typename T> CUTLASS_HOST_DEVICE T root_ln_four();
/// Returns e, approximately 2.718...
template <typename T> CUTLASS_HOST_DEVICE T e();
/// Returns (1/2)
template <typename T> CUTLASS_HOST_DEVICE T half();
/// Returns sqrt(2), approximately 1.414...
template <typename T> CUTLASS_HOST_DEVICE T root_two();
/// Returns sqrt(2)/2, approximately 0.707...
template <typename T> CUTLASS_HOST_DEVICE T half_root_two();
/// Returns ln(2), approximately 0.693...
template <typename T> CUTLASS_HOST_DEVICE T ln_two();
/// Returns ln(ln(2)), approximately -0.3665...
template <typename T> CUTLASS_HOST_DEVICE T ln_ln_two();
/// Returns 1/3, approximately 0.333...
template <typename T> CUTLASS_HOST_DEVICE T third();
/// Returns 2/3, approximately 0.666...
template <typename T> CUTLASS_HOST_DEVICE T twothirds();
/// Returns pi - 3, approximately 0.1416...
template <typename T> CUTLASS_HOST_DEVICE T pi_minus_three();
/// Returns 4 - pi, approximately 0.858...
template <typename T> CUTLASS_HOST_DEVICE T four_minus_pi();
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for double
/// Returns 1, the multiplicative identity element (specialization for double)
template <> CUTLASS_HOST_DEVICE double one<double>() {
uint64_t bits = 0x3ff0000000000000ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> one< complex<double> >() {
return complex<double>(one<double>(), double());
}
/// Returns 0, the additive identity element (specialization for double)
template <> CUTLASS_HOST_DEVICE double zero<double>() {
uint64_t bits = 0x0ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> zero< complex<double> >() {
return complex<double>(zero<double>(), double());
}
/// Returns 2 (specialization for double)
template <> CUTLASS_HOST_DEVICE double two<double>() {
uint64_t bits = 0x4000000000000000ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 2 (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> two< complex<double> >() {
return complex<double>(two<double>(), double());
}
/// Returns pi, approximately 3.141 (specialization for double)
template <> CUTLASS_HOST_DEVICE double pi<double>() {
uint64_t bits = 0x400921fb54442d18ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> pi< complex<double> >() {
return complex<double>(pi<double>(), double());
}
/// Returns 2 * pi (specialization for double)
template <> CUTLASS_HOST_DEVICE double two_pi<double>() {
uint64_t bits = 0x401921fb54442d18ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 2 * pi (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> two_pi< complex<double> >() {
return complex<double>(two_pi<double>(), double());
}
/// Returns pi / 2 (specialization for double)
template <> CUTLASS_HOST_DEVICE double half_pi<double>() {
uint64_t bits = 0x3ff921fb54442d18ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns pi / 2 (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> half_pi< complex<double> >() {
return complex<double>(half_pi<double>(), double());
}
/// Returns sqrt(pi) (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_pi<double>() {
uint64_t bits = 0x3ffc5bf891b4ef6aull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_pi< complex<double> >() {
return complex<double>(root_pi<double>(), double());
}
/// Returns sqrt(pi / 2) (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_half_pi<double>() {
uint64_t bits = 0x3ff40d931ff62705ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_half_pi< complex<double> >() {
return complex<double>(root_half_pi<double>(), double());
}
/// Returns sqrt(2 * pi) (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_two_pi<double>() {
uint64_t bits = 0x40040d931ff62705ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_two_pi< complex<double> >() {
return complex<double>(root_two_pi<double>(), double());
}
/// Returns sqrt(ln(4)) (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_ln_four<double>() {
uint64_t bits = 0x3ff2d6abe44afc43ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_ln_four< complex<double> >() {
return complex<double>(root_ln_four<double>(), double());
}
/// Returns e, approximately 2.718... (specialization for double)
template <> CUTLASS_HOST_DEVICE double e<double>() {
uint64_t bits = 0x4005bf0a8b145769ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> e< complex<double> >() {
return complex<double>(e<double>(), double());
}
/// Returns (1/2) (specialization for double)
template <> CUTLASS_HOST_DEVICE double half<double>() {
uint64_t bits = 0x3fe0000000000000ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns (1/2) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> half< complex<double> >() {
return complex<double>(half<double>(), double());
}
/// Returns sqrt(2), approximately 1.414... (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_two<double>() {
uint64_t bits = 0x3ff6a09e667f3bcdull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_two< complex<double> >() {
return complex<double>(root_two<double>(), double());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for double)
template <> CUTLASS_HOST_DEVICE double half_root_two<double>() {
uint64_t bits = 0x3fe6a09e667f3bcdull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> half_root_two< complex<double> >() {
return complex<double>(half_root_two<double>(), double());
}
/// Returns ln(2), approximately 0.693... (specialization for double)
template <> CUTLASS_HOST_DEVICE double ln_two<double>() {
uint64_t bits = 0x3fe62e42fefa39efull;
return reinterpret_cast<double const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> ln_two< complex<double> >() {
return complex<double>(ln_two<double>(), double());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for double)
template <> CUTLASS_HOST_DEVICE double ln_ln_two<double>() {
uint64_t bits = 0xbfd774f29bdd6b9full;
return reinterpret_cast<double const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> ln_ln_two< complex<double> >() {
return complex<double>(ln_ln_two<double>(), double());
}
/// Returns 1/3, approximately 0.333... (specialization for double)
template <> CUTLASS_HOST_DEVICE double third<double>() {
uint64_t bits = 0x3fd5555555555555ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> third< complex<double> >() {
return complex<double>(third<double>(), double());
}
/// Returns 2/3, approximately 0.666... (specialization for double)
template <> CUTLASS_HOST_DEVICE double twothirds<double>() {
uint64_t bits = 0x3fe5555555555555ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> twothirds< complex<double> >() {
return complex<double>(twothirds<double>(), double());
}
/// Returns pi - 3, approximately 0.1416... (specialization for double)
template <> CUTLASS_HOST_DEVICE double pi_minus_three<double>() {
uint64_t bits = 0x3fc21fb54442d180ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> pi_minus_three< complex<double> >() {
return complex<double>(pi_minus_three<double>(), double());
}
/// Returns 4 - pi, approximately 0.858... (specialization for double)
template <> CUTLASS_HOST_DEVICE double four_minus_pi<double>() {
uint64_t bits = 0x3feb7812aeef4ba0ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> four_minus_pi< complex<double> >() {
return complex<double>(four_minus_pi<double>(), double());
}
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for float
/// Returns 1, the multiplicative identity element (specialization for float)
template <> CUTLASS_HOST_DEVICE float one<float>() {
uint32_t bits = 0x3f800000u;
return reinterpret_cast<float const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> one< complex<float> >() {
return complex<float>(one<float>(), float());
}
/// Returns 0, the additive identity element (specialization for float)
template <> CUTLASS_HOST_DEVICE float zero<float>() {
uint32_t bits = 0x0u;
return reinterpret_cast<float const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> zero< complex<float> >() {
return complex<float>(zero<float>(), float());
}
/// Returns 2 (specialization for float)
template <> CUTLASS_HOST_DEVICE float two<float>() {
uint32_t bits = 0x40000000u;
return reinterpret_cast<float const &>(bits);
}
/// Returns 2 (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> two< complex<float> >() {
return complex<float>(two<float>(), float());
}
/// Returns pi, approximately 3.141 (specialization for float)
template <> CUTLASS_HOST_DEVICE float pi<float>() {
uint32_t bits = 0x40490fdbu;
return reinterpret_cast<float const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> pi< complex<float> >() {
return complex<float>(pi<float>(), float());
}
/// Returns 2 * pi (specialization for float)
template <> CUTLASS_HOST_DEVICE float two_pi<float>() {
uint32_t bits = 0x40c90fdbu;
return reinterpret_cast<float const &>(bits);
}
/// Returns 2 * pi (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> two_pi< complex<float> >() {
return complex<float>(two_pi<float>(), float());
}
/// Returns pi / 2 (specialization for float)
template <> CUTLASS_HOST_DEVICE float half_pi<float>() {
uint32_t bits = 0x3fc90fdbu;
return reinterpret_cast<float const &>(bits);
}
/// Returns pi / 2 (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> half_pi< complex<float> >() {
return complex<float>(half_pi<float>(), float());
}
/// Returns sqrt(pi) (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_pi<float>() {
uint32_t bits = 0x3fe2dfc5u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_pi< complex<float> >() {
return complex<float>(root_pi<float>(), float());
}
/// Returns sqrt(pi / 2) (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_half_pi<float>() {
uint32_t bits = 0x3fa06c99u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_half_pi< complex<float> >() {
return complex<float>(root_half_pi<float>(), float());
}
/// Returns sqrt(2 * pi) (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_two_pi<float>() {
uint32_t bits = 0x40206c99u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_two_pi< complex<float> >() {
return complex<float>(root_two_pi<float>(), float());
}
/// Returns sqrt(ln(4)) (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_ln_four<float>() {
uint32_t bits = 0x3f96b55fu;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_ln_four< complex<float> >() {
return complex<float>(root_ln_four<float>(), float());
}
/// Returns e, approximately 2.718... (specialization for float)
template <> CUTLASS_HOST_DEVICE float e<float>() {
uint32_t bits = 0x402df854u;
return reinterpret_cast<float const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> e< complex<float> >() {
return complex<float>(e<float>(), float());
}
/// Returns (1/2) (specialization for float)
template <> CUTLASS_HOST_DEVICE float half<float>() {
uint32_t bits = 0x3f000000u;
return reinterpret_cast<float const &>(bits);
}
/// Returns (1/2) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> half< complex<float> >() {
return complex<float>(half<float>(), float());
}
/// Returns sqrt(2), approximately 1.414... (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_two<float>() {
uint32_t bits = 0x3fb504f3u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_two< complex<float> >() {
return complex<float>(root_two<float>(), float());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for float)
template <> CUTLASS_HOST_DEVICE float half_root_two<float>() {
uint32_t bits = 0x3f3504f3u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> half_root_two< complex<float> >() {
return complex<float>(half_root_two<float>(), float());
}
/// Returns ln(2), approximately 0.693... (specialization for float)
template <> CUTLASS_HOST_DEVICE float ln_two<float>() {
uint32_t bits = 0x3f317218u;
return reinterpret_cast<float const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> ln_two< complex<float> >() {
return complex<float>(ln_two<float>(), float());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for float)
template <> CUTLASS_HOST_DEVICE float ln_ln_two<float>() {
uint32_t bits = 0xbebba795u;
return reinterpret_cast<float const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> ln_ln_two< complex<float> >() {
return complex<float>(ln_ln_two<float>(), float());
}
/// Returns 1/3, approximately 0.333... (specialization for float)
template <> CUTLASS_HOST_DEVICE float third<float>() {
uint32_t bits = 0x3eaaaaabu;
return reinterpret_cast<float const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> third< complex<float> >() {
return complex<float>(third<float>(), float());
}
/// Returns 2/3, approximately 0.666... (specialization for float)
template <> CUTLASS_HOST_DEVICE float twothirds<float>() {
uint32_t bits = 0x3f2aaaabu;
return reinterpret_cast<float const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> twothirds< complex<float> >() {
return complex<float>(twothirds<float>(), float());
}
/// Returns pi - 3, approximately 0.1416... (specialization for float)
template <> CUTLASS_HOST_DEVICE float pi_minus_three<float>() {
uint32_t bits = 0x3e10fdaau;
return reinterpret_cast<float const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> pi_minus_three< complex<float> >() {
return complex<float>(pi_minus_three<float>(), float());
}
/// Returns 4 - pi, approximately 0.858... (specialization for float)
template <> CUTLASS_HOST_DEVICE float four_minus_pi<float>() {
uint32_t bits = 0x3f5bc095u;
return reinterpret_cast<float const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> four_minus_pi< complex<float> >() {
return complex<float>(four_minus_pi<float>(), float());
}
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for tfloat32_t
/// Returns 1, the multiplicative identity element (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t one<tfloat32_t>() {
uint32_t bits = 0x3f801000u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> one< complex<tfloat32_t> >() {
return complex<tfloat32_t>(one<tfloat32_t>(), tfloat32_t());
}
/// Returns 0, the additive identity element (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t zero<tfloat32_t>() {
uint32_t bits = 0x1000u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> zero< complex<tfloat32_t> >() {
return complex<tfloat32_t>(zero<tfloat32_t>(), tfloat32_t());
}
/// Returns 2 (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t two<tfloat32_t>() {
uint32_t bits = 0x40001000u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 2 (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(two<tfloat32_t>(), tfloat32_t());
}
/// Returns pi, approximately 3.141 (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t pi<tfloat32_t>() {
uint32_t bits = 0x40491fdbu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(pi<tfloat32_t>(), tfloat32_t());
}
/// Returns 2 * pi (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t two_pi<tfloat32_t>() {
uint32_t bits = 0x40c91fdbu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 2 * pi (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> two_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(two_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns pi / 2 (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t half_pi<tfloat32_t>() {
uint32_t bits = 0x3fc91fdbu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns pi / 2 (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> half_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(half_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(pi) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_pi<tfloat32_t>() {
uint32_t bits = 0x3fe2efc5u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(pi / 2) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_half_pi<tfloat32_t>() {
uint32_t bits = 0x3fa07c99u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_half_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_half_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(2 * pi) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_two_pi<tfloat32_t>() {
uint32_t bits = 0x40207c99u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_two_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_two_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(ln(4)) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_ln_four<tfloat32_t>() {
uint32_t bits = 0x3f96c55fu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_ln_four< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_ln_four<tfloat32_t>(), tfloat32_t());
}
/// Returns e, approximately 2.718... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t e<tfloat32_t>() {
uint32_t bits = 0x402e0854u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> e< complex<tfloat32_t> >() {
return complex<tfloat32_t>(e<tfloat32_t>(), tfloat32_t());
}
/// Returns (1/2) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t half<tfloat32_t>() {
uint32_t bits = 0x3f001000u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns (1/2) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> half< complex<tfloat32_t> >() {
return complex<tfloat32_t>(half<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(2), approximately 1.414... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_two<tfloat32_t>() {
uint32_t bits = 0x3fb514f3u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_two<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t half_root_two<tfloat32_t>() {
uint32_t bits = 0x3f3514f3u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> half_root_two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(half_root_two<tfloat32_t>(), tfloat32_t());
}
/// Returns ln(2), approximately 0.693... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t ln_two<tfloat32_t>() {
uint32_t bits = 0x3f318218u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> ln_two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(ln_two<tfloat32_t>(), tfloat32_t());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t ln_ln_two<tfloat32_t>() {
uint32_t bits = 0xbebbb795u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> ln_ln_two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(ln_ln_two<tfloat32_t>(), tfloat32_t());
}
/// Returns 1/3, approximately 0.333... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t third<tfloat32_t>() {
uint32_t bits = 0x3eaabaabu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> third< complex<tfloat32_t> >() {
return complex<tfloat32_t>(third<tfloat32_t>(), tfloat32_t());
}
/// Returns 2/3, approximately 0.666... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t twothirds<tfloat32_t>() {
uint32_t bits = 0x3f2abaabu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> twothirds< complex<tfloat32_t> >() {
return complex<tfloat32_t>(twothirds<tfloat32_t>(), tfloat32_t());
}
/// Returns pi - 3, approximately 0.1416... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t pi_minus_three<tfloat32_t>() {
uint32_t bits = 0x3e110daau;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> pi_minus_three< complex<tfloat32_t> >() {
return complex<tfloat32_t>(pi_minus_three<tfloat32_t>(), tfloat32_t());
}
/// Returns 4 - pi, approximately 0.858... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t four_minus_pi<tfloat32_t>() {
uint32_t bits = 0x3f5bd095u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> four_minus_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(four_minus_pi<tfloat32_t>(), tfloat32_t());
}
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for half_t
/// Returns 1, the multiplicative identity element (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t one<half_t>() {
uint16_t bits = 0x3c00u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> one< complex<half_t> >() {
return complex<half_t>(one<half_t>(), half_t());
}
/// Returns 0, the additive identity element (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t zero<half_t>() {
uint16_t bits = 0x0u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> zero< complex<half_t> >() {
return complex<half_t>(zero<half_t>(), half_t());
}
/// Returns 2 (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t two<half_t>() {
uint16_t bits = 0x4000u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 2 (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> two< complex<half_t> >() {
return complex<half_t>(two<half_t>(), half_t());
}
/// Returns pi, approximately 3.141 (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t pi<half_t>() {
uint16_t bits = 0x4248u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> pi< complex<half_t> >() {
return complex<half_t>(pi<half_t>(), half_t());
}
/// Returns 2 * pi (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t two_pi<half_t>() {
uint16_t bits = 0x4648u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 2 * pi (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> two_pi< complex<half_t> >() {
return complex<half_t>(two_pi<half_t>(), half_t());
}
/// Returns pi / 2 (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t half_pi<half_t>() {
uint16_t bits = 0x3e48u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns pi / 2 (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> half_pi< complex<half_t> >() {
return complex<half_t>(half_pi<half_t>(), half_t());
}
/// Returns sqrt(pi) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_pi<half_t>() {
uint16_t bits = 0x3f17u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_pi< complex<half_t> >() {
return complex<half_t>(root_pi<half_t>(), half_t());
}
/// Returns sqrt(pi / 2) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_half_pi<half_t>() {
uint16_t bits = 0x3d03u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_half_pi< complex<half_t> >() {
return complex<half_t>(root_half_pi<half_t>(), half_t());
}
/// Returns sqrt(2 * pi) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_two_pi<half_t>() {
uint16_t bits = 0x4103u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_two_pi< complex<half_t> >() {
return complex<half_t>(root_two_pi<half_t>(), half_t());
}
/// Returns sqrt(ln(4)) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_ln_four<half_t>() {
uint16_t bits = 0x3cb6u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_ln_four< complex<half_t> >() {
return complex<half_t>(root_ln_four<half_t>(), half_t());
}
/// Returns e, approximately 2.718... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t e<half_t>() {
uint16_t bits = 0x4170u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> e< complex<half_t> >() {
return complex<half_t>(e<half_t>(), half_t());
}
/// Returns (1/2) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t half<half_t>() {
uint16_t bits = 0x3800u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns (1/2) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> half< complex<half_t> >() {
return complex<half_t>(half<half_t>(), half_t());
}
/// Returns sqrt(2), approximately 1.414... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_two<half_t>() {
uint16_t bits = 0x3da8u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_two< complex<half_t> >() {
return complex<half_t>(root_two<half_t>(), half_t());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t half_root_two<half_t>() {
uint16_t bits = 0x39a8u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> half_root_two< complex<half_t> >() {
return complex<half_t>(half_root_two<half_t>(), half_t());
}
/// Returns ln(2), approximately 0.693... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t ln_two<half_t>() {
uint16_t bits = 0x398cu;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> ln_two< complex<half_t> >() {
return complex<half_t>(ln_two<half_t>(), half_t());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t ln_ln_two<half_t>() {
uint16_t bits = 0xb5ddu;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> ln_ln_two< complex<half_t> >() {
return complex<half_t>(ln_ln_two<half_t>(), half_t());
}
/// Returns 1/3, approximately 0.333... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t third<half_t>() {
uint16_t bits = 0x3555u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> third< complex<half_t> >() {
return complex<half_t>(third<half_t>(), half_t());
}
/// Returns 2/3, approximately 0.666... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t twothirds<half_t>() {
uint16_t bits = 0x3955u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> twothirds< complex<half_t> >() {
return complex<half_t>(twothirds<half_t>(), half_t());
}
/// Returns pi - 3, approximately 0.1416... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t pi_minus_three<half_t>() {
uint16_t bits = 0x3088u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> pi_minus_three< complex<half_t> >() {
return complex<half_t>(pi_minus_three<half_t>(), half_t());
}
/// Returns 4 - pi, approximately 0.858... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t four_minus_pi<half_t>() {
uint16_t bits = 0x3adeu;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> four_minus_pi< complex<half_t> >() {
return complex<half_t>(four_minus_pi<half_t>(), half_t());
}
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for bfloat16_t
/// Returns 1, the multiplicative identity element (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t one<bfloat16_t>() {
uint16_t bits = 0x3f80u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> one< complex<bfloat16_t> >() {
return complex<bfloat16_t>(one<bfloat16_t>(), bfloat16_t());
}
/// Returns 0, the additive identity element (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t zero<bfloat16_t>() {
uint16_t bits = 0x0u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> zero< complex<bfloat16_t> >() {
return complex<bfloat16_t>(zero<bfloat16_t>(), bfloat16_t());
}
/// Returns 2 (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t two<bfloat16_t>() {
uint16_t bits = 0x4000u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 2 (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(two<bfloat16_t>(), bfloat16_t());
}
/// Returns pi, approximately 3.141 (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t pi<bfloat16_t>() {
uint16_t bits = 0x4049u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(pi<bfloat16_t>(), bfloat16_t());
}
/// Returns 2 * pi (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t two_pi<bfloat16_t>() {
uint16_t bits = 0x40c9u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 2 * pi (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> two_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(two_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns pi / 2 (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t half_pi<bfloat16_t>() {
uint16_t bits = 0x3fc9u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns pi / 2 (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> half_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(half_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(pi) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_pi<bfloat16_t>() {
uint16_t bits = 0x3fe3u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(pi / 2) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_half_pi<bfloat16_t>() {
uint16_t bits = 0x3fa0u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_half_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_half_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(2 * pi) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_two_pi<bfloat16_t>() {
uint16_t bits = 0x4020u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_two_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_two_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(ln(4)) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_ln_four<bfloat16_t>() {
uint16_t bits = 0x3f97u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_ln_four< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_ln_four<bfloat16_t>(), bfloat16_t());
}
/// Returns e, approximately 2.718... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t e<bfloat16_t>() {
uint16_t bits = 0x402eu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> e< complex<bfloat16_t> >() {
return complex<bfloat16_t>(e<bfloat16_t>(), bfloat16_t());
}
/// Returns (1/2) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t half<bfloat16_t>() {
uint16_t bits = 0x3f00u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns (1/2) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> half< complex<bfloat16_t> >() {
return complex<bfloat16_t>(half<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(2), approximately 1.414... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_two<bfloat16_t>() {
uint16_t bits = 0x3fb5u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_two<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t half_root_two<bfloat16_t>() {
uint16_t bits = 0x3f35u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> half_root_two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(half_root_two<bfloat16_t>(), bfloat16_t());
}
/// Returns ln(2), approximately 0.693... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t ln_two<bfloat16_t>() {
uint16_t bits = 0x3f31u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> ln_two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(ln_two<bfloat16_t>(), bfloat16_t());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t ln_ln_two<bfloat16_t>() {
uint16_t bits = 0xbebcu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> ln_ln_two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(ln_ln_two<bfloat16_t>(), bfloat16_t());
}
/// Returns 1/3, approximately 0.333... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t third<bfloat16_t>() {
uint16_t bits = 0x3eabu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> third< complex<bfloat16_t> >() {
return complex<bfloat16_t>(third<bfloat16_t>(), bfloat16_t());
}
/// Returns 2/3, approximately 0.666... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t twothirds<bfloat16_t>() {
uint16_t bits = 0x3f2bu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> twothirds< complex<bfloat16_t> >() {
return complex<bfloat16_t>(twothirds<bfloat16_t>(), bfloat16_t());
}
/// Returns pi - 3, approximately 0.1416... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t pi_minus_three<bfloat16_t>() {
uint16_t bits = 0x3e11u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> pi_minus_three< complex<bfloat16_t> >() {
return complex<bfloat16_t>(pi_minus_three<bfloat16_t>(), bfloat16_t());
}
/// Returns 4 - pi, approximately 0.858... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t four_minus_pi<bfloat16_t>() {
uint16_t bits = 0x3f5cu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> four_minus_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(four_minus_pi<bfloat16_t>(), bfloat16_t());
}
///////////////////////////////////////////////////////////////////////////////////
} // namespace constants
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////
| include/cutlass/constants.h/0 | {
"file_path": "include/cutlass/constants.h",
"repo_id": "include",
"token_count": 17582
} | 24 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Deconv2d
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kStrided,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
> struct DefaultDeconv2d;
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Deconv2d specialization for Analytic IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
conv::StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kUnity
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
conv::GroupMode::kNone,
true /*IsDeconv*/
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kStrided
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
conv::GroupMode::kNone,
true /*IsDeconv*/
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Deconv2d specialization for Optimized IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kUnity
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
true /*IsDeconv*/
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kStrided
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
true /*IsDeconv*/
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Deconv2d specialization for Analytic IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
conv::StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kUnity
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
conv::GroupMode::kNone,
true /*IsDeconv*/
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kStrided
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
conv::GroupMode::kNone,
true /*IsDeconv*/
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Deconv2d specialization for Optimized IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
StrideSupport::kUnity
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
true /*IsDeconv*/
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv2d <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA,
conv::StrideSupport::kStrided
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIteratorStridedDgrad<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
cutlass::AlignedArray<ElementB, ThreadMapB::kElementsPerAccess>,
true /*IsDeconv*/
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv
>;
};
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_deconv2d.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_deconv2d.h",
"repo_id": "include",
"token_count": 9688
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dDgradOutputGradientTileAccessIteratorOptimized;
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradOutputGradientTileAccessIteratorOptimized strided dgrad needs special handling
// to skip MMAs (Dx = Dy * w) on invalid filter positions
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradOutputGradientTileAccessIteratorOptimized <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kStrided,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
using Mask = uint64_t;
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or greater.");
//
// Simpligying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dStridedDgradOutputGradientIteratorOptimizedParams;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
int filter_k_;
int filter_r_;
int filter_s_;
int start_r_;
int start_s_;
int64_t reset_bytes_s_;
int64_t reset_bytes_r_;
Index masks_[ThreadMap::Iterations::kStrided][kAccessesPerVector][2];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorOptimized(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord() // threadblock offset - units are whole CTA tiles
):
params_(params),
problem_size_(problem_size),
filter_k_(0),
filter_r_(start_r),
filter_s_(start_s),
start_r_(start_r),
start_s_(start_s) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
reset_bytes_s_ = (problem_size_.num_gemm_k_filter_s(start_s_) - 1) * params_.inc_next[0];
reset_bytes_r_ = (problem_size_.num_gemm_k_filter_s(start_s_) - 1) * params_.inc_next[0] +
(problem_size_.num_gemm_k_filter_r(start_r_) - 1) * params_.inc_next[1];
int offset_n[ThreadMap::Iterations::kStrided];
int offset_p[ThreadMap::Iterations::kStrided];
int offset_q[ThreadMap::Iterations::kStrided];
int filter_r = filter_r_;
int filter_s = filter_s_;
if (problem_size_.mode == Mode::kConvolution) {
filter_r = (problem_size_.R - 1 - filter_r);
filter_s = (problem_size_.S - 1 - filter_s);
}
// Starting h, w positions for filter position in gemm_k=0
int start_h, start_w;
strided_dgrad_starting_coords(
problem_size_,
stride_h_divmod, stride_w_divmod,
filter_r, filter_s,
start_h, start_w);
// Effective starting P and Q for filter position required for remapping NHW rows
int P = (problem_size_.H - start_h + problem_size_.stride_h - 1) / problem_size_.stride_h;
int Q = (problem_size_.W - start_w + problem_size_.stride_w - 1) / problem_size_.stride_w;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_npq = (threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided) % params_.tiled_rows_per_filter;
// (STEP 1) [reorder NHW rows to start with same filter positions]
offset_n[s] = offset_npq / (P * Q);
int residual = offset_npq % (P * Q);
int p = (residual / Q);
int q = (residual % Q);
int mapped_h = (start_h + p * problem_size_.stride_h);
int mapped_w = (start_w + q * problem_size_.stride_w);
// Access (p, q) coordinates for Dy tensor for filter position in gemm_k=0
// note that (h + pad_h - filter_r) and (w + pad_w - filter_s) are ensured to be
// divisible by stride_h and stride_w
offset_p[s] = (mapped_h + problem_size_.pad_h - filter_r) / problem_size_.stride_h;
offset_q[s] = (mapped_w + problem_size_.pad_w - filter_s) / problem_size_.stride_w;
// Initialize pointers for gemm_k=0
TensorCoord coord{offset_n[s], offset_p[s], offset_q[s], filter_k_};
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
//
// Precompute mask predicates
//
clear_mask();
CUTLASS_PRAGMA_NO_UNROLL
for (int r = start_r; r < problem_size_.R; r += problem_size_.stride_h) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int p = offset_p[s_idx] ;
p += (params_.conv_sign * (r / problem_size_.stride_h));
bool pred = (offset_n[s_idx] < problem_size_.N && p >= 0 && p < problem_size_.P);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][0] |= (pred << r);
}
}
}
CUTLASS_PRAGMA_NO_UNROLL
for(int s = start_s; s < problem_size_.S; s += problem_size_.stride_w) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int q = offset_q[s_idx];
q += (params_.conv_sign * (s / problem_size_.stride_w));
bool pred = (q >=0 && q < problem_size_.Q);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][1] |= (pred << s);
}
}
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, (filter_k_ + v_idx * AccessType::kElements) >= problem_size.K);
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn});
}
private:
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset, LongIndex byte_reset = 0) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset - byte_reset;
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void advance() {
int next_idx = 0;
int64_t reset_bytes = 0;
// Move filter_s by stride_w
filter_s_ += problem_size_.stride_w;
if (filter_s_ >= problem_size_.S) {
// Restore filter_s
filter_s_ = start_s_;
// Move filter_r by stride_h
filter_r_ += problem_size_.stride_h;
#if 0
if (filter_r_ < problem_size_.R) {
next_idx = 1;
// Restore bytes in q coordinate (Mma in filter s dimension)
reset_bytes = reset_bytes_s_;
} else {
// Restore filter_r
filter_r_ = start_r_;
next_idx = 2;
// Restore bytes in p and q coordinate (Mma in filter s and r dimension)
reset_bytes = reset_bytes_r_;
}
#else
asm volatile(
"{\n\t"
" .reg .pred %%p;\n\t"
" setp.lt.s32 %%p, %3, %4;\n\t"
" selp.s32 %0, %3, %5, %%p;\n\t"
" selp.s32 %1, 1, 2, %%p;\n\t"
" selp.s64 %2, %6, %7, %%p;\n\t"
"}\n"
: "=r"(filter_r_), "=r"(next_idx), "=l"(reset_bytes)
: "r"(filter_r_), "r"(problem_size_.R), "r"(start_r_),
"l"(reset_bytes_s_), "l"(reset_bytes_r_));
#endif
}
// offset pointers by offset_bytes
add_byte_offset_(params_.inc_next[next_idx] - reset_bytes);
if (next_idx == 2) {
filter_k_ += params_.filter_k_delta;
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, (filter_k_ + v_idx * AccessType::kElements) >= problem_size_.K);
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
masks_[s][v][0] = clear ? Mask(0) : masks_[s][v][0];
masks_[s][v][1] = clear ? Mask(0) : masks_[s][v][1];
}
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(int v, bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][v][0] = clear ? Mask(0) : masks_[s][v][0];
masks_[s][v][1] = clear ? Mask(0) : masks_[s][v][1];
}
}
/// Returns true if the current coordinate is within the output tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
return
(masks_[iteration_strided_][iteration_vector_][0] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][iteration_vector_][1] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
// Limit on filter size
if (problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradOutputGradientTileAccessIteratorOptimized unity stride dgrad is optimized for dgrad
// with problem stride = {1x1}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradOutputGradientTileAccessIteratorOptimized <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kUnity,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
using Mask = uint64_t;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dDgradOutputGradientIteratorOptimizedParams;
private:
Conv2dDgradOutputGradientIteratorOptimizedParams const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
// current filter position (r, s)
int filter_r_;
int filter_s_;
int filter_k_;
Index masks_[ThreadMap::Iterations::kStrided][kAccessesPerVector][2];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorOptimized(
Conv2dDgradOutputGradientIteratorOptimizedParams const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
filter_k_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
int offset_n[ThreadMap::Iterations::kStrided];
int offset_h[ThreadMap::Iterations::kStrided];
int offset_w[ThreadMap::Iterations::kStrided];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_nhw = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// offset_n[s] = offset_nhw / (problem_size_.H * problem_size_.W);
// int residual = offset_nhw % (problem_size_.H * problem_size_.W);
//
// offset_h[s] = residual / problem_size_.W;
// offset_w[s] = residual % problem_size_.W;
//
int residual;
params_.hw_divmod(offset_n[s], residual, offset_nhw);
params_.w_divmod(offset_h[s], offset_w[s], residual);
TensorCoord coord = at_(offset_n[s], offset_h[s], offset_w[s], 0, 0);
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
clear_mask();
CUTLASS_PRAGMA_NO_UNROLL
for (int r = 0; r < problem_size_.R; ++r) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int r_ = r;
if (problem_size_.mode == Mode::kConvolution) {
r_ = problem_size_.R - 1 - r;
}
int p = offset_h[s_idx] + problem_size_.pad_h - r_ * problem_size_.dilation_h;
bool pred = (offset_n[s_idx] < problem_size_.N && p >= 0 && p < problem_size_.P);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][0] |= (pred << r);
}
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int s = 0; s < problem_size_.S; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int s_ = s;
if (problem_size_.mode == Mode::kConvolution) {
s_ = problem_size_.S - 1 - s;
}
int q = offset_w[s_idx] + problem_size_.pad_w - s_ * problem_size_.dilation_w;
bool pred = (q >= 0 && q < problem_size_.Q);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][1] |= (pred << s);
}
}
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_k_ + v_idx * AccessType::kElements >= problem_size.K);
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided});
}
private:
/// Returns the coordinate in the output gradient tensor dy that is correspoinding to
// activation nhw and filter position k, r, s
CUTLASS_HOST_DEVICE
TensorCoord at_(int n, int h, int w, int r, int s) const {
if (problem_size_.mode == Mode::kConvolution) {
r = problem_size_.R - 1 - r;
s = problem_size_.S - 1 - s;
}
int p = h + problem_size_.pad_h - r * problem_size_.dilation_h;
int q = w + problem_size_.pad_w - s * problem_size_.dilation_w;
return TensorCoord(n, p, q, filter_k_);
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset;
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_HOST_DEVICE
void advance() {
int next_idx = 0;
// moves to the next tile
++filter_s_;
if (filter_s_ == problem_size_.S) {
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
next_idx = 1;
}
else {
filter_r_ = 0;
next_idx = 2;
}
}
add_byte_offset_(params_.inc_next[next_idx]);
if (next_idx == 2) {
filter_k_ += params_.filter_k_delta;
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, (filter_k_ + v_idx * AccessType::kElements) >= problem_size_.K);
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
masks_[s][v][0] = clear ? Mask(0) : masks_[s][v][0];
masks_[s][v][1] = clear ? Mask(0) : masks_[s][v][1];
}
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(int v, bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][v][0] = clear ? Mask(0) : masks_[s][v][0];
masks_[s][v][1] = clear ? Mask(0) : masks_[s][v][1];
}
}
CUTLASS_HOST_DEVICE
bool valid() {
return
(masks_[iteration_strided_][iteration_vector_][0] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][iteration_vector_][1] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// This is specialized for unit stride
if (problem_size.stride() != MatrixCoord({1, 1})) {
return Status::kErrorNotSupported;
}
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorNotSupported;
}
// Limit on filter size
if (problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_optimized.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_optimized.h",
"repo_id": "include",
"token_count": 10228
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity
>
class Conv3dDgradFilterTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = StrideSupport_;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
//
// Parameters structure
//
struct Params : Conv3dDgradFilterIteratorOptimizedParams {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Conv3dDgradFilterIteratorOptimizedParams const &base):
Conv3dDgradFilterIteratorOptimizedParams(base) { }
CUTLASS_HOST_DEVICE
Params(
Conv3dProblemSize const &problem_size,
Layout const &layout
):
Conv3dDgradFilterIteratorOptimizedParams(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
) { }
};
private:
Conv3dDgradFilterIteratorOptimizedParams const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
uint32_t predicates_;
int filter_trs_;
int filter_k_;
//
// Assertions
//
// We map predicates into bits packed in this uint32_t container
static_assert(ThreadMap::Iterations::kStrided *
ThreadMap::Iterations::kContiguous < sizeof(predicates_) * 8,
"Currently, the number of loads per iteration is limited by the size of the predicates container.");
public:
CUTLASS_HOST_DEVICE
Conv3dDgradFilterTileAccessIteratorOptimized(
Conv3dDgradFilterIteratorOptimizedParams const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_(0),
filter_trs_(0),
filter_k_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.row() + thread_coord.strided();
Index column = threadblock_offset.column() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int filter_k = filter_k_ + s * ThreadMap::Delta::kStrided;
int filter_c = column + c * ThreadMap::Delta::kContiguous;
uint32_t pred = ((filter_k < problem_size_.K && filter_c < problem_size_.C) ? 1u : 0);
int pred_idx = c + s * ThreadMap::Iterations::kContiguous;
predicates_ |= (pred << pred_idx);
}
}
pointer_ += (
filter_k_ * params.layout.stride()[3] + column
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
LongIndex next = params_.inc_next_trs;
// moves to the next tile
++filter_trs_;
if (filter_trs_ == params_.TRS) {
filter_trs_ = 0;
next = params_.inc_next_k;
filter_k_ += params_.filter_k_delta;
}
// Clear predicates if needed
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
if (filter_k_ + s * ThreadMap::Delta::kStrided >= problem_size_.K) {
uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous);
predicates_ = (predicates_ & (~kClearMask));
}
}
pointer_ += next;
}
/// Returns true if the current coordinate is within the filter tensor W
CUTLASS_HOST_DEVICE
bool valid() {
LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous;
return (predicates_ & (1u << pred_idx));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dDgradFilterTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
// Move to the next K coordinate within the tile
pointer_ += params_.inc_next_strided;
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_dgrad_filter_tile_access_iterator_optimized.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_dgrad_filter_tile_access_iterator_optimized.h",
"repo_id": "include",
"token_count": 3194
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Interface betweeen a CUTLASS device-wide operator and CUDA.
*/
#pragma once
#include <cuda_runtime_api.h>
#include "cutlass/cutlass.h"
#include "cutlass/trace.h"
#include "cutlass/platform/platform.h"
#if ! defined(__CUDACC_RTC__)
#include <cstdio>
#endif
#if ((__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8)))
# define CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Macro-level guard for CUDA Host Adapter
//
#if !defined(CUTLASS_ENABLE_CUDA_HOST_ADAPTER)
#define CUTLASS_ENABLE_CUDA_HOST_ADAPTER false
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This class defines an object which abstracts interactions between the CUTLASS device-wide GEMM and
/// CUDA. The intention is to enable CUTLASS to be used with both the CUDA Runtime API and CUDA Driver API.
struct CudaHostAdapter {
/// Limit the number of kernels
static constexpr int32_t kMaximumKernelCount = 4;
/// Maximum cluster size
static constexpr int MaxClusterSize = 32;
//
// Data members
//
/// Handles
void *kernel_handles[kMaximumKernelCount];
int32_t kernel_count = 0;
//
// Methods
//
/// Ctor
CudaHostAdapter() = default;
/// Dtor
virtual ~CudaHostAdapter() {}
/// Copy Ctor
inline CudaHostAdapter(const CudaHostAdapter & rhs):
kernel_count(rhs.kernel_count)
{
CUTLASS_ASSERT(rhs.kernel_count >= 0 && rhs.kernel_count < kMaximumKernelCount);
for (int32_t i = 0; i < rhs.kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = rhs.kernel_handles[i];
}
}
/// Copy Assignment
inline CudaHostAdapter& operator=(const CudaHostAdapter & rhs) {
CUTLASS_ASSERT(rhs.kernel_count >= 0 && rhs.kernel_count < kMaximumKernelCount);
for (int32_t i = 0; i < rhs.kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = rhs.kernel_handles[i];
}
kernel_count = rhs.kernel_count;
return *this;
}
/// Move ctor
inline CudaHostAdapter(CudaHostAdapter && rhs):
kernel_count(rhs.kernel_count)
{
CUTLASS_ASSERT(rhs.kernel_count >= 0 && rhs.kernel_count < kMaximumKernelCount);
for (int32_t i = 0; i < rhs.kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = rhs.kernel_handles[i];
}
}
/// Move assignment
inline CudaHostAdapter& operator=(CudaHostAdapter && rhs) {
CUTLASS_ASSERT(rhs.kernel_count >= 0 && rhs.kernel_count < kMaximumKernelCount);
for (int32_t i = 0; i < rhs.kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = rhs.kernel_handles[i];
}
kernel_count = rhs.kernel_count;
return *this;
}
/// Ctor
inline CudaHostAdapter(
void **kernel_handles_,
int32_t kernel_count_
):
kernel_count(kernel_count_)
{
CUTLASS_ASSERT(kernel_count >= 0);
for (int32_t i = 0; i < kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = kernel_handles_[i];
}
}
/// Returns true if the CudaHostAdapter is empty (kernel_count == 0)
inline bool empty() const { return !kernel_count; }
/// Returns kernel_count
inline size_t size() const { return static_cast<size_t>(kernel_count); }
/// Queries the occupancy of a kernel
virtual Status query_occupancy(
int32_t *device_sms,
int32_t *sm_occupancy,
int32_t kernel_index,
int32_t thread_count,
int32_t smem_size) const = 0;
/// Launches a kernel without using Threadblock Clusters.
virtual Status launch(
dim3 const grid_dims,
dim3 const block_dims,
size_t const smem_size,
cudaStream_t cuda_stream,
void** kernel_params,
int32_t kernel_index) const = 0;
/// Launches a kernel using the CUDA Extensible Launch API and Threadblock Clusters.
virtual Status launch(
dim3 const grid_dims,
dim3 const cluster_dims,
dim3 const block_dims,
size_t const smem_size,
cudaStream_t cuda_stream,
void** kernel_params,
int32_t kernel_index) const = 0;
protected:
/**
* Fills a buffer in Global Memory with a byte sequence copied from host memory.
* This function can be overriden to dispatch to the appropriate cuMemsetD*Async API
*/
virtual Status memsetDeviceImpl(
void* destination, ///< Device memory pointer to be filled
void const* fill_value, ///< Value to be filled in the buffer
size_t fill_size, ///< Size of the data type to be used for filling the buffer
size_t count, ///< Number of elements of size fill_size
cudaStream_t stream) const = 0;
public:
/// Fills a buffer in Global Memory with a byte sequence copied from host memory
template<class FillValueType>
Status memsetDevice(
void* destination,
FillValueType fill_value,
size_t count,
cudaStream_t stream) const
{
return this->memsetDeviceImpl(
destination,
&fill_value,
sizeof(FillValueType),
count,
stream);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/cuda_host_adapter.hpp/0 | {
"file_path": "include/cutlass/cuda_host_adapter.hpp",
"repo_id": "include",
"token_count": 2346
} | 28 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing elementwise operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/barrier.h"
#include "cutlass/epilogue/dispatch_policy.hpp"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cutlass/epilogue/thread/scale_type.h"
#include "cutlass/epilogue/fusion/callbacks.hpp"
#include "cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp"
#include "cutlass/detail/layout.hpp"
#include "cutlass/trace.h"
#include "cute/tensor.hpp"
#include "cutlass/cuda_host_adapter.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace collective {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int StagesC_,
int StagesD_,
int FragmentSize_,
bool ReuseSmemC_,
bool DelayTmaStore_,
class CtaTileMNK_, // (CTA_M,CTA_N,CTA_K)
class EpilogueTile_, // (EPI_TILE_M,EPI_TILE_N)
class ElementC_,
class StrideC_,
class ElementD_,
class StrideD_,
class FusionCallbacks_,
class CopyOpG2S_,
class SmemLayoutAtomC_,
class CopyOpS2R_,
class CopyOpS2G_,
class SmemLayoutAtomD_,
class CopyOpR2S_
>
class CollectiveEpilogue<
Sm90TmaWarpSpecialized<StagesC_,StagesD_,FragmentSize_,ReuseSmemC_,DelayTmaStore_>,
CtaTileMNK_,
EpilogueTile_,
ElementC_,
StrideC_,
ElementD_,
StrideD_,
FusionCallbacks_,
CopyOpG2S_,
SmemLayoutAtomC_,
CopyOpS2R_,
CopyOpS2G_,
SmemLayoutAtomD_,
CopyOpR2S_
> {
public:
//
// Type Aliases
//
using DispatchPolicy = Sm90TmaWarpSpecialized<StagesC_,StagesD_,FragmentSize_,ReuseSmemC_,DelayTmaStore_>;
using CtaTileMNK = CtaTileMNK_;
using EpilogueTile = EpilogueTile_;
using FusionCallbacks = FusionCallbacks_;
using ElementC = ElementC_;
using StrideC = StrideC_;
using ElementD = ElementD_;
using StrideD = StrideD_;
using CopyOpG2S = CopyOpG2S_;
using SmemLayoutAtomC = SmemLayoutAtomC_;
using CopyOpS2R = CopyOpS2R_;
using CopyOpS2G = CopyOpS2G_;
using SmemLayoutAtomD = SmemLayoutAtomD_;
using CopyOpR2S = CopyOpR2S_;
using ThreadEpilogueOp = typename epilogue::fusion::FusionCallbacksTraits<FusionCallbacks>::Operation;
using GmemTiledCopyC = CopyOpG2S;
using GmemTiledCopyD = CopyOpS2G;
static_assert(!is_layout<EpilogueTile>::value && is_tuple<EpilogueTile>::value, "EpilogueTile must be a cute::Tile or cute::Shape");
static_assert(cute::rank(CtaTileMNK{}) == 3, "CtaTileMNK must be rank-3: [CTA_M, CTA_N, CTA_K]");
static_assert(cute::rank(EpilogueTile{}) == 2, "EpilogueTile must be rank-2: [EPI_TILE_M, EPI_TILE_N]");
static_assert(size<0>(CtaTileMNK{}) % size<0>(shape(EpilogueTile{})) == 0, "EPI_TILE_M must divide CTA_M");
static_assert(size<1>(CtaTileMNK{}) % size<1>(shape(EpilogueTile{})) == 0, "EPI_TILE_N must divide CTA_N");
static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]");
static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]");
private:
constexpr static bool is_source_supported = not cute::is_void_v<ElementC>;
constexpr static bool is_destination_supported = not cute::is_void_v<ElementD>;
using SmemElementD = cute::conditional_t<not is_destination_supported,fusion::get_element_aux_t<FusionCallbacks>, ElementD>;
static_assert(not cute::is_void_v<SmemElementD>, "SmemElementD is void");
using SmemElementC = cute::conditional_t<not is_source_supported,SmemElementD,ElementC>; // prevents void ref breakages
constexpr static int StagesC = StagesC_;
constexpr static int StagesD = StagesD_;
constexpr static bool ReuseSmemC = ReuseSmemC_ and is_destination_supported;
constexpr static bool DelayTmaStore = DelayTmaStore_;
constexpr static bool is_m_major_C = detail::is_m_major<StrideC>();
constexpr static bool is_m_major_D = detail::is_m_major<StrideD>();
constexpr static bool is_im2col_C = cute::is_same_v<CopyOpG2S, SM90_TMA_LOAD_IM2COL>;
constexpr static bool is_im2col_D = cute::is_same_v<CopyOpS2G, SM90_TMA_STORE_IM2COL>;
using SmemLayoutC = decltype(tile_to_shape(
SmemLayoutAtomC{},
make_shape(size<0>(EpilogueTile{}), size<1>(EpilogueTile{}), Int<StagesC>{}),
cute::conditional_t<is_m_major_C, Step<_2,_1,_3>, Step<_1,_2,_3>>{} ));
using SmemLayoutD = decltype(tile_to_shape(
SmemLayoutAtomD{},
make_shape(size<0>(EpilogueTile{}), size<1>(EpilogueTile{}), Int<ReuseSmemC ? StagesC : StagesD>{}),
cute::conditional_t<is_m_major_D, Step<_2,_1,_3>, Step<_1,_2,_3>>{} ));
constexpr static bool support_smem_reuse = is_source_supported && is_destination_supported && StagesD <= StagesC
&& cosize(take<0,2>(SmemLayoutC{})) == cosize(take<0,2>(SmemLayoutD{}));
static_assert(not (ReuseSmemC && not support_smem_reuse), "Smem reuse requirements not met");
constexpr static size_t SmemAlignmentD = cutlass::detail::alignment_for_swizzle(SmemLayoutD{});
constexpr static size_t SmemAlignmentC = cutlass::detail::alignment_for_swizzle(SmemLayoutC{});
using EmptyType = cute::tuple<>;
using SmemCStorage = cute::conditional_t<is_source_supported and (not ReuseSmemC),
array_aligned<SmemElementC, size(SmemLayoutC{}), SmemAlignmentC>,
EmptyType>;
using SmemDStorage = cute::conditional_t<is_destination_supported,
array_aligned<SmemElementD, size(SmemLayoutD{}), SmemAlignmentD>,
EmptyType>;
struct TensorStorageImpl: cute::tuple<SmemCStorage, SmemDStorage> {
using Base = cute::tuple<SmemCStorage, SmemDStorage>;
constexpr decltype(auto)
smem_C() {
return cute::get<0>(static_cast<Base &>(*this));
}
constexpr decltype(auto)
smem_D() {
return cute::get<1>(static_cast<Base &>(*this));
}
using FusionStorage = typename FusionCallbacks::SharedStorage;
FusionStorage thread;
};
public:
// TMA pipeline for loading C
using LoadPipeline = cutlass::PipelineTransactionAsync<StagesC>;
using LoadPipelineState = cutlass::PipelineState<StagesC>;
constexpr static uint32_t TmaTransactionBytes =
(size(take<0,2>(SmemLayoutC{})) * static_cast<uint32_t>(sizeof_bits<SmemElementC>::value)) / 8;
// TMA pipeline for storing D
using StorePipeline = cute::conditional_t<ReuseSmemC,
cutlass::PipelineTmaStore<StagesC, StagesD-1>,
cutlass::PipelineTmaStore<StagesD>>;
using StorePipelineState = cutlass::PipelineState<ReuseSmemC ? StagesC : StagesD>;
struct SharedStorage {
using TensorStorage = TensorStorageImpl;
TensorStorage tensors;
using PipelineStorage = typename LoadPipeline::SharedStorage;
PipelineStorage pipeline;
};
using TensorStorage = typename SharedStorage::TensorStorage;
using PipelineStorage = typename SharedStorage::PipelineStorage;
// Host side epilogue arguments
struct Arguments {
typename FusionCallbacks::Arguments thread{};
ElementC const* ptr_C;
StrideC dC;
ElementD const* ptr_D;
StrideD dD;
};
// Device side epilogue params
struct Params {
using TMA_C = decltype(make_tma_copy(
CopyOpG2S{},
make_tensor(make_gmem_ptr(static_cast<SmemElementC const*>(nullptr)),
repeat_like(StrideC{}, int32_t(0)), StrideC{}),
take<0,2>(SmemLayoutC{}),
EpilogueTile{},
_1{}));
using TMA_D = decltype(make_tma_copy(
CopyOpS2G{},
make_tensor(make_gmem_ptr(static_cast<SmemElementD const*>(nullptr)),
repeat_like(StrideD{}, int32_t(0)), StrideD{}),
take<0,2>(SmemLayoutD{}),
EpilogueTile{},
_1{}));
typename FusionCallbacks::Params thread{};
TMA_C tma_load_c;
TMA_D tma_store_d;
};
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(
ProblemShape const& problem_shape,
Arguments const& args,
[[maybe_unused]] void* workspace) {
// Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M, N, K, L] = problem_shape_MNKL;
// For fprop/dgrad kernel, problem shape M is multimodal which should be linearized under tiled mode
auto M_C = conditional_return<is_im2col_C>(M, size(M));
auto M_D = conditional_return<is_im2col_D>(M, size(M));
typename Params::TMA_C tma_load_c = {};
if constexpr (is_source_supported) {
Tensor tensor_c = make_tensor(make_gmem_ptr(args.ptr_C), make_layout(make_shape(M_C,N,L), args.dC));
tma_load_c = make_tma_copy(CopyOpG2S{}, tensor_c, take<0,2>(SmemLayoutC{}), EpilogueTile{}, _1{});
}
typename Params::TMA_D tma_store_d;
if constexpr (is_destination_supported) {
Tensor tensor_d = make_tensor(make_gmem_ptr(args.ptr_D), make_layout(make_shape(M_D,N,L), args.dD));
tma_store_d = make_tma_copy(CopyOpS2G{}, tensor_d, take<0,2>(SmemLayoutD{}), EpilogueTile{}, _1{});
}
return {
FusionCallbacks::to_underlying_arguments(problem_shape, args.thread, workspace),
tma_load_c,
tma_store_d
};
}
template <class ProblemShape>
static size_t
get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) {
return FusionCallbacks::get_workspace_size(problem_shape, args.thread);
}
template <class ProblemShape>
static cutlass::Status
initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream,
CudaHostAdapter* cuda_adapter = nullptr) {
return FusionCallbacks::initialize_workspace(problem_shape, args.thread, workspace, stream, cuda_adapter);
}
template <class ProblemShape>
CUTLASS_HOST_DEVICE static bool
can_implement(
ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
constexpr int tma_alignment_bits = 128;
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
bool implementable = true;
if constexpr (is_destination_supported) {
constexpr int min_tma_aligned_elements_D = tma_alignment_bits / cutlass::sizeof_bits<ElementD>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_D>(cute::make_shape(M,N,L), StrideD{});
}
if constexpr (not cute::is_void_v<ElementC>) {
constexpr int min_tma_aligned_elements_C = tma_alignment_bits / cutlass::sizeof_bits<ElementC>::value;
implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_C>(cute::make_shape(M,N,L), StrideC{});
}
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n");
}
return implementable;
}
template<class TileShapeMNK>
CUTLASS_HOST_DEVICE
static constexpr int
get_load_pipe_increment(TileShapeMNK tile_shape_MNK) {
// Compute number of epilogue subtiles
return size<1>(zipped_divide(make_layout(take<0,2>(tile_shape_MNK)), EpilogueTile{}));
}
template<class TileShapeMNK>
CUTLASS_HOST_DEVICE
static constexpr int
get_store_pipe_increment(TileShapeMNK tile_shape_MNK) {
return get_load_pipe_increment(tile_shape_MNK);
}
/// Issue Tma Descriptor Prefetch -- ideally from a single thread for best performance
CUTLASS_DEVICE
static void
prefetch_tma_descriptors(Params const& epilogue_params) {
if constexpr (is_source_supported) {
cute::prefetch_tma_descriptor(epilogue_params.tma_load_c.get_tma_descriptor());
}
if constexpr (is_destination_supported) {
cute::prefetch_tma_descriptor(epilogue_params.tma_store_d.get_tma_descriptor());
}
}
CUTLASS_HOST_DEVICE
CollectiveEpilogue(Params const& params_, TensorStorage& shared_tensors)
: params(params_), fusion_callbacks(params_.thread, shared_tensors.thread) {}
CUTLASS_DEVICE
bool
is_producer_load_needed() const {
return fusion_callbacks.is_producer_load_needed();
}
template<
class ProblemShapeMNKL,
class TileShapeMNK,
class TileCoordMNKL,
class TiledMma
>
CUTLASS_DEVICE auto
load(
LoadPipeline load_pipeline,
LoadPipelineState load_pipe_producer_state,
ProblemShapeMNKL problem_shape_mnkl,
TileShapeMNK tile_shape_MNK,
TileCoordMNKL tile_coord_mnkl,
TiledMma tiled_mma,
int thread_idx,
TensorStorage& shared_tensors,
int subtile_idx=-1) {
using namespace cute;
// Indexing variables
auto [M, N, K, L] = problem_shape_mnkl;
auto [m_coord, n_coord, k_coord, l_coord] = tile_coord_mnkl;
// The tma tensor C under im2col mode only has two modes (M, N) which
// should be local tiled with only (m_coord, n_coord).
auto coord_shape = conditional_return<is_im2col_C>(
make_coord(m_coord, n_coord),
make_coord(m_coord, n_coord, l_coord));
// Tile residue
auto residue_mn = make_coord(M,N);
// Represent the full source tensor, slice to get the tile this CTA is currently responsible for
Tensor mC_mn = params.tma_load_c.get_tma_tensor(make_shape(M,N,L)); // (M,N,L)
Tensor mC = coalesce(mC_mn, take<0,2>(CtaTileMNK{}));
Tensor gC = local_tile(mC, take<0,2>(CtaTileMNK{}), coord_shape); // (CTA_M,CTA_N)
// Apply epilogue subtile, get matching smem tensor
SmemElementC* ptr_sC = nullptr;
if constexpr (is_source_supported) {
if constexpr (ReuseSmemC) {
ptr_sC = reinterpret_cast<SmemElementC*>(shared_tensors.smem_D().data());
} else {
ptr_sC = shared_tensors.smem_C().data();
}
}
Tensor gC_epi = flat_divide(gC, EpilogueTile{}); // (EPI_TILE_M,EPI_TILE_N,EPI_M,EPI_N)
Tensor sC_epi = make_tensor(make_smem_ptr(ptr_sC), SmemLayoutC{}); // (EPI_TILE_M,EPI_TILE_N,PIPE_C)
// Prepare the thread(b)lock's (G)mem to (S)mem TMA tiled copy (bGS_)
ThrCopy thrblk_g2s = params.tma_load_c.get_slice(Int<0>{});
Tensor bGS_gC = thrblk_g2s.partition_S(gC_epi); // (G2S,G2S_M,G2S_N,EPI_M,EPI_N)
Tensor bGS_sC = thrblk_g2s.partition_D(sC_epi); // (G2S,G2S_M,G2S_N,PIPE_C)
// Get the fusion callbacks for the producer load warp
auto pld_args = cutlass::epilogue::fusion::detail::ProducerLoadArgs{
problem_shape_mnkl,
CtaTileMNK{},
tile_coord_mnkl,
residue_mn,
EpilogueTile{},
thread_idx
};
auto pld_callbacks = fusion_callbacks.get_producer_load_callbacks(pld_args);
bool is_C_load_needed = is_source_supported && fusion_callbacks.is_C_load_needed();
// Predication for TMA load (one thread issues TMA load)
bool issue_tma_load = cute::elect_one_sync();
// Acquire the lock for the first stage
uint64_t* tma_barrier = load_pipeline.producer_get_barrier(load_pipe_producer_state);
load_pipeline.producer_acquire(load_pipe_producer_state);
// Pre-loop fusion callback entry point
pld_callbacks.begin(tma_barrier, load_pipe_producer_state.count(), issue_tma_load);
CUTLASS_PRAGMA_UNROLL
for (int epi_n = 0; epi_n < size<3>(gC_epi); ++epi_n) {
CUTLASS_PRAGMA_UNROLL
for (int epi_m = 0; epi_m < size<2>(gC_epi); ++epi_m) {
if (subtile_idx != -1 && (epi_n * static_cast<int>(size<2>(gC_epi)) + epi_m) != subtile_idx) {
continue;
}
// Acquire the lock for this stage
constexpr uint16_t mcast_mask = 0;
uint64_t* tma_barrier = load_pipeline.producer_get_barrier(load_pipe_producer_state);
load_pipeline.producer_acquire(load_pipe_producer_state);
// Loop fusion callback entry point
pld_callbacks.step(tma_barrier, epi_m, epi_n, load_pipe_producer_state.count(), issue_tma_load);
// Execute the TMA load for C if needed
if (issue_tma_load && is_C_load_needed) {
copy(params.tma_load_c.with(*tma_barrier, mcast_mask),
bGS_gC(_,_,_,epi_m,epi_n), bGS_sC(_,_,_,load_pipe_producer_state.index()));
load_pipeline.producer_expect_transaction(load_pipe_producer_state);
}
// Commit TMA loads for this stage and release the lock
load_pipeline.producer_commit(load_pipe_producer_state);
++load_pipe_producer_state;
}
}
// Post-loop fusion callback entry point
pld_callbacks.end();
return load_pipe_producer_state;
}
CUTLASS_DEVICE auto
load_tail(
LoadPipeline load_pipeline,
LoadPipelineState load_pipe_producer_state) {
bool issue_tma_load = cute::elect_one_sync();
if (issue_tma_load) {
load_pipeline.producer_tail(load_pipe_producer_state);
}
return load_pipe_producer_state;
}
template<
class ProblemShapeMNKL,
class TileShapeMNK,
class TileCoordMNKL,
class AccEngine, class AccLayout,
class TiledMma
>
CUTLASS_DEVICE auto
store(
LoadPipeline load_pipeline,
LoadPipelineState load_pipe_consumer_state,
StorePipeline store_pipeline,
StorePipelineState store_pipe_producer_state,
ProblemShapeMNKL problem_shape_mnkl,
TileShapeMNK tile_shape_MNK,
TileCoordMNKL tile_coord_mnkl,
cute::Tensor<AccEngine,AccLayout> accumulators,
TiledMma tiled_mma,
int thread_idx,
TensorStorage& shared_tensors,
int subtile_idx=-1) {
using namespace cute;
using ElementAccumulator = typename AccEngine::value_type;
using ElementCompute_ = typename epilogue::fusion::FusionCallbacksTraits<FusionCallbacks>::ElementCompute;
using ElementCompute = cute::conditional_t<cute::is_void_v<ElementCompute_>,ElementAccumulator,ElementCompute_>;
static_assert(is_rmem<AccEngine>::value, "Accumulator must be RF resident.");
static_assert(rank(AccLayout{}) == 3, "Accumulator must be MMA-partitioned: (MMA,MMA_M,MMA_N)");
static_assert(rank(ProblemShapeMNKL{}) == 4, "ProblemShapeMNKL must be rank 4");
static_assert(is_static<TileShapeMNK>::value, "TileShapeMNK must be static");
static_assert(rank(TileShapeMNK{}) == 3, "TileShapeMNK must be rank 3");
static_assert(rank(TileCoordMNKL{}) == 4, "TileCoordMNKL must be rank 4");
// Indexing variables
auto [M, N, K, L] = problem_shape_mnkl;
auto [m_coord, n_coord, k_coord, l_coord] = tile_coord_mnkl;
auto mma_tile_m = tile_size<0>(tiled_mma);
auto mma_tile_n = tile_size<1>(tiled_mma);
auto epi_tile_m = size<0>(EpilogueTile{});
auto epi_tile_n = size<1>(EpilogueTile{});
// The tma tensor D under im2col mode only has two modes (M, N) which
// should be local tiled with only (m_coord, n_coord).
auto coord_shape = conditional_return<is_im2col_D>(
make_coord(m_coord, n_coord),
make_coord(m_coord, n_coord, l_coord));
// Represent the full output tensor, slice to get the tile this CTA is responsible for
Tensor mD_mn = params.tma_store_d.get_tma_tensor(make_shape(M,N,L)); // (M,N,L)
Tensor mD = coalesce(mD_mn, take<0,2>(CtaTileMNK{}));
Tensor gD = local_tile(mD, take<0,2>(CtaTileMNK{}), coord_shape); // (CTA_M,CTA_N)
// Apply epilogue subtiling
Tensor gD_epi = flat_divide(gD, EpilogueTile{}); // (EPI_TILE_M,EPI_TILE_N,EPI_M,EPI_N)
// Construct the corresponding pipelined smem tensors
SmemElementC* ptr_sC = nullptr;
if constexpr (is_source_supported) {
if constexpr (ReuseSmemC) {
ptr_sC = reinterpret_cast<SmemElementC*>(shared_tensors.smem_D().data());
} else {
ptr_sC = shared_tensors.smem_C().data();
}
}
SmemElementD* ptr_sD = nullptr;
if constexpr (is_destination_supported) {
ptr_sD = shared_tensors.smem_D().data();
}
Tensor sC_epi = cute::as_position_independent_swizzle_tensor(
make_tensor(make_smem_ptr(ptr_sC), SmemLayoutC{})); // (EPI_TILE_M,EPI_TILE_N,PIPE_C)
Tensor sD_epi = cute::as_position_independent_swizzle_tensor(
make_tensor(make_smem_ptr(ptr_sD), SmemLayoutD{})); // (EPI_TILE_M,EPI_TILE_N,PIPE_D)
// Get the smallest tiled copy we can use to retile the accumulators
using CopyAtomC = Copy_Atom<SM90_U32x4_STSM_N, cutlass::half_t>;
TiledCopy tiled_copy_C_atom = make_tiled_copy_C_atom(CopyAtomC{}, tiled_mma);
// (t)hread-partition for (r)egister to (s)mem copy (tRS_)
TiledCopy tiled_r2s = make_tiled_copy_S(Copy_Atom<CopyOpR2S,SmemElementD>{}, tiled_copy_C_atom);
ThrCopy thread_r2s = tiled_r2s.get_slice(thread_idx);
Tensor tRS_rAcc = thread_r2s.retile_S(accumulators); // ((R2S,R2S_V),MMA_M,MMA_N)
Tensor tRS_sD = thread_r2s.partition_D(sD_epi); // (R2S,R2S_M,R2S_N,PIPE_D)
// Allocate D registers
Layout tRS_rD_layout = make_layout(take<0,3>(shape(thread_r2s.partition_S(sD_epi))));
Tensor tRS_rD = make_tensor<SmemElementD>(tRS_rD_layout); // (R2S,R2S_M,R2S_N)
// Vectorized fragment view
constexpr int FragmentSize = DispatchPolicy::FragmentSize;
Tensor tRS_rAcc_frg = recast<Array<ElementAccumulator, FragmentSize>>(tRS_rAcc);
Tensor tRS_rD_frg = recast<Array<SmemElementD , FragmentSize>>(tRS_rD);
CUTE_STATIC_ASSERT(size<0>(tRS_rAcc) % FragmentSize == 0, "Fragment size does not vectorize properly");
// (t)hread-partition for (s)mem to (r)egister copy (tSR_)
TiledCopy tiled_s2r = make_tiled_copy_S(Copy_Atom<CopyOpS2R, SmemElementC>{}, tiled_copy_C_atom);
ThrCopy thread_s2r = tiled_s2r.get_slice(thread_idx);
Tensor tSR_sC = thread_s2r.partition_S(sC_epi); // (S2R,S2R_M,S2R_N,PIPE_C)
Layout tSR_rC_layout = thread_s2r.retile_D(tRS_rD).layout(); // (S2R,S2R_M,S2R_N)
// Allocate C registers
// If C smem load is a non-vectorized dst(i) = src(i) then we can allocate C registers directly in the compute type
// to eliminate some redundant pack+unpack instruction sequences for sub-word types
constexpr bool IsDirectS2R = cute::is_same_v<CopyOpS2R, AutoVectorizingCopyWithAssumedAlignment<128>>
&& decltype(max_common_vector(tSR_rC_layout, tSR_sC.layout()))::value <= 1;
using RegisterElementC = cute::conditional_t<IsDirectS2R, ElementCompute, SmemElementC>;
Tensor tRS_rC = make_tensor<RegisterElementC>(tRS_rD_layout); // (R2S,R2S_M,R2S_N)
Tensor tSR_rC = thread_s2r.retile_D(tRS_rC); // (S2R,S2R_M,S2R_N)
// thread(b)lock-partition for (s)mem to (g)mem copy (bSG_)
ThrCopy thrblk_s2g = params.tma_store_d.get_slice(Int<0>{});
Tensor bSG_sD = thrblk_s2g.partition_S(sD_epi); // (S2G,S2G_M,S2G_N,PIPE_D)
Tensor bSG_gD = thrblk_s2g.partition_D(gD_epi); // (S2G,S2G_M,S2G_N,EPI_M,EPI_N)
// OOB predication for tile quantization "residue"
Tensor mD_crd = make_identity_tensor(make_shape(M,N));
Tensor cD = local_tile(mD_crd, take<0,2>(CtaTileMNK{}), make_coord(m_coord, n_coord));
Tensor tRS_cD = thread_r2s.partition_S(flat_divide(cD, EpilogueTile{}));
auto residue_mn = make_coord(M,N);
CUTE_STATIC_ASSERT(mma_tile_m == epi_tile_m, "EPI_TILE_M must equal MMA_TILE_M");
CUTE_STATIC_ASSERT(mma_tile_n % epi_tile_n == 0, "EPI_TILE_N must divide MMA_TILE_N");
// Get the fusion callbacks for the consumer store warps
constexpr bool RefSrc = true; // Register tensors reference R2S copy src layout
auto cst_args = cutlass::epilogue::fusion::detail::ConsumerStoreArgs{
problem_shape_mnkl,
CtaTileMNK{},
tile_coord_mnkl,
residue_mn,
EpilogueTile{},
tiled_copy_C_atom,
thread_idx,
cD,
tRS_cD,
tRS_rC
};
auto cst_callbacks = fusion_callbacks.get_consumer_store_callbacks<RefSrc>(cst_args);
bool is_producer_load_needed = fusion_callbacks.is_producer_load_needed();
bool is_C_load_needed = is_source_supported && fusion_callbacks.is_C_load_needed();
// Thread synchronizer for previously issued waits or fences
// to ensure visibility of smem reads/writes to threads or TMA unit
auto synchronize = [&] () { cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::EpilogueBarrier); };
// Predication for TMA store (one warp issues TMA store)
bool issue_tma_store = (thread_idx / NumThreadsPerWarp) == 0;
// In the reuse smem configuration we have StagesC smem buffers and at most StagesD committed TMA stores in flight.
// The TMA store pipeline producer acquire returns when at most StagesD-1 committed stores are in-flight, so we can
// only guarantee store completion after StagesD iterations, then we can begin issuing releases on the smem buffer locks.
// store_pipe_producer_state tracks the acquire and load_pipe_consumer_state tracks the release, in circular buffer fashion.
LoadPipelineState load_wait_state = load_pipe_consumer_state;
if constexpr (ReuseSmemC) {
load_wait_state = store_pipe_producer_state;
load_wait_state.phase_ ^= 1;
}
// We can delay issue of TMA store by one iteration to achieve better interleaving of non-TMA instructions
// Sync requirements of smem reuse may preclude this optimization
// Delayed stores cause delayed stage releases which causes deadlock when StagesC == StagesD
int epi_m_prev = 0, epi_n_prev = 0;
static_assert(not (DelayTmaStore and ReuseSmemC and StagesC == StagesD), "This TMA epilogue configuration will deadlock");
// The TMA store sequence for one subtile iteration
auto tma_store_fn = [&] (int epi_m, int epi_n) {
// Write the tile from smem to gmem with TMA
cutlass::arch::fence_view_async_shared(); // ensure smem writes are visible to TMA
synchronize(); // ensure all threads have issued their async fence
if constexpr (is_destination_supported) {
if (issue_tma_store) {
copy(params.tma_store_d, bSG_sD(_,_,_,store_pipe_producer_state.index()), bSG_gD(_,_,_,epi_m,epi_n));
}
}
// Post async fence, pre TMA commit callback entry point
cst_callbacks.tma_store(epi_m, epi_n, store_pipe_producer_state.count(), issue_tma_store);
// Commit the TMA stores for this stage
if (issue_tma_store) {
store_pipeline.producer_commit(store_pipe_producer_state);
}
++store_pipe_producer_state;
++issued_stores;
// Wait for the next smem buffer to be available
if (issue_tma_store) {
store_pipeline.producer_acquire(store_pipe_producer_state);
}
synchronize();
if constexpr (ReuseSmemC) {
// producer_acquire returns when at most StagesD-1 committed stores are pending
bool store_finished = issued_stores > StorePipeline::UnacquiredStages;
// Let dma warp know earliest smem buffer is consumed and empty after StagesD producer commits
if (store_finished) {
if (is_producer_load_needed) {
load_pipeline.consumer_release(load_pipe_consumer_state);
}
++load_pipe_consumer_state;
}
}
};
//
// BEGIN EPILOGUE
//
// Pre-loop fusion callback entry point
cst_callbacks.begin();
// For each output tile
CUTLASS_PRAGMA_UNROLL
for (int epi_n = 0; epi_n < size<3>(gD_epi); ++epi_n) {
CUTLASS_PRAGMA_UNROLL
for (int epi_m = 0; epi_m < size<2>(gD_epi); ++epi_m) {
bool is_first_iteration = epi_m == 0 && epi_n == 0;
bool is_last_iteration = epi_m == size<2>(gD_epi)-1 && epi_n == size<3>(gD_epi)-1;
if (subtile_idx != -1 && (epi_n * static_cast<int>(size<2>(gD_epi)) + epi_m) != subtile_idx) {
continue;
}
// The current tile in accumulator
int mma_m = epi_m;
int mma_n = (epi_n * size<1>(EpilogueTile{})) / mma_tile_n;
Tensor tRS_rAcc_frg_mn = tRS_rAcc_frg(_,mma_m,mma_n);
if (is_producer_load_needed) {
// Wait for the producer load to fill smem
load_pipeline.consumer_wait(load_wait_state);
if (is_C_load_needed) {
// Copy source tile from smem to register
copy(tiled_s2r, tSR_sC(_,_,_,load_wait_state.index()), tSR_rC);
}
}
// First loop fusion callback entry point
cst_callbacks.previsit(epi_m, epi_n, load_wait_state.count(), is_producer_load_needed);
if (is_producer_load_needed) {
if constexpr (not ReuseSmemC) {
// Let producer load warp know smem buffers are consumed and empty
cutlass::arch::fence_view_async_shared();
load_pipeline.consumer_release(load_pipe_consumer_state);
++load_pipe_consumer_state;
}
++load_wait_state;
}
// Vectorized fragment loop with visitor callback entry point
int epi_n_in_mma = epi_n % (mma_tile_n / epi_tile_n);
int r2s_v = epi_n_in_mma * size(tRS_rD_frg);
CUTLASS_PRAGMA_UNROLL
for (int epi_v = 0; epi_v < size(tRS_rD_frg); ++epi_v) {
tRS_rD_frg(epi_v) = cst_callbacks.visit(tRS_rAcc_frg_mn(r2s_v + epi_v), epi_v, epi_m, epi_n);
}
// The latest we can delay the TMA store is right before the smem store of the next iteration
// since the current TMA store needs to be committed before we can acquire the next smem buffer
if constexpr (DelayTmaStore) {
// Issue TMA stores for the previous subtile
if (not is_first_iteration and subtile_idx == -1) {
tma_store_fn(epi_m_prev, epi_n_prev);
}
epi_m_prev = epi_m;
epi_n_prev = epi_n;
}
// Smem reduction callback entry point using current store buffer for workspace
cst_callbacks.reduce(sD_epi(_,_,store_pipe_producer_state.index()),
synchronize, epi_m, epi_n, is_last_iteration);
// Copy tile from register to smem
if constexpr (is_destination_supported) {
copy(tiled_r2s, tRS_rD, tRS_sD(_,_,_,store_pipe_producer_state.index()));
}
// Post reduction, pre TMA store callback entry point
constexpr bool issue_smem_store = true; // No smem store predication
cst_callbacks.postreduce(epi_m, epi_n, store_pipe_producer_state.count(), issue_smem_store);
if constexpr (not DelayTmaStore) {
// Issue TMA stores for this subtile
tma_store_fn(epi_m, epi_n);
}
} // for epi_m
} // for epi_n
if constexpr (DelayTmaStore) {
// Issue TMA stores for the last subtile
tma_store_fn(epi_m_prev, epi_n_prev);
}
// Post-loop fusion callback entry point
cst_callbacks.end();
return cute::make_tuple(load_pipe_consumer_state, store_pipe_producer_state);
}
CUTLASS_DEVICE auto
store_tail(
LoadPipeline load_pipeline,
LoadPipelineState load_pipe_consumer_state,
StorePipeline store_pipeline,
StorePipelineState store_pipe_producer_state) {
// wait for all TMA stores to complete
store_pipeline.producer_tail(store_pipe_producer_state);
// reset store counter
issued_stores = 0;
if constexpr (ReuseSmemC) {
if (fusion_callbacks.is_producer_load_needed()) {
// Issue releases on up to StagesD-1 previously issued TMA stores
constexpr int release_stages = cute::min(StorePipeline::UnacquiredStages, get_load_pipe_increment(CtaTileMNK{}));
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < release_stages; ++stage) {
load_pipeline.consumer_release(load_pipe_consumer_state);
++load_pipe_consumer_state;
}
}
}
return cute::make_tuple(load_pipe_consumer_state, store_pipe_producer_state);
}
private:
Params const& params;
FusionCallbacks fusion_callbacks;
int issued_stores = 0;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace collective
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/collective/sm90_epilogue_tma_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/epilogue/collective/sm90_epilogue_tma_warpspecialized.hpp",
"repo_id": "include",
"token_count": 15129
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear scaling operations used by epilogues. Values are clamped before
converting to the output element type.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/scale_type.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Single source of truth for whether to unroll for `LinearCombinationClamp()`
constexpr bool LinearCombinationClampIsHeavy() {
return false;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements then clamps the output before
/// converting to the output element type.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class LinearCombinationClamp {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
using FragmentSource = Array<ElementOutput, kCount>;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = detail::LinearCombinationClampIsHeavy();
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta
): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha
): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationClamp(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source,
ElementCompute uniform = ElementCompute(0)) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_source = source_converter(source);
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_source;
multiply_add<ComputeFragment> mul_add_accumulator;
minimum<ComputeFragment> min_accumulator;
maximum<ComputeFragment> max_accumulator;
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
}
/// Clamping constant value
ElementCompute const kClampMax =
ElementCompute(platform::numeric_limits<ElementOutput>::max());
ElementCompute const kClampMin =
ElementCompute(platform::numeric_limits<ElementOutput>::lowest());
intermediate = max_accumulator(intermediate, kClampMin);
intermediate = min_accumulator(intermediate, kClampMax);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_accumulator;
minimum<ComputeFragment> min_accumulator;
maximum<ComputeFragment> max_accumulator;
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
}
/// Clamping constant value
ElementCompute const kClampMax =
ElementCompute(platform::numeric_limits<ElementOutput>::max());
ElementCompute const kClampMin =
ElementCompute(platform::numeric_limits<ElementOutput>::lowest());
intermediate = max_accumulator(intermediate, kClampMin);
intermediate = min_accumulator(intermediate, kClampMax);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conditional guards to enable partial specialization for packed integers
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && ((__CUDACC_VER_MAJOR__ > 10) || ((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2)))
/// Applies a linear combination operator to an array of elements then clamps the output before
/// converting to the output element type.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
ScaleType::Kind Scale, ///< Control Alpha and Beta scaling
FloatRoundStyle Round
>
class LinearCombinationClamp<ElementOutput_, Count, int, float, Scale, Round> {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = int;
using ElementCompute = float;
static_assert(
platform::numeric_limits<ElementOutput>::is_integer,
"This elementwise op expects the output to be int.");
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = detail::LinearCombinationClampIsHeavy();
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta
): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha
): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationClamp(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source,
ElementCompute uniform = ElementCompute(0)) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_source = source_converter(source);
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Compute linear scaling in floating point
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_source;
multiply_add<ComputeFragment> mul_add_accumulator;
// Float min-max
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
}
//
// Convert float => ElementOutput_ with clamping
//
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Compute linear scaling in floating point
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_accumulator;
// Float min-max
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
}
//
// Convert float => ElementOutput_ with clamping
//
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
};
#endif // Conditional guards to enable partial specialization for packed integers
////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements then clamps
/// the output before converting to the output element type.
///
/// D = alpha * accumulator + beta * source + uniform
///
/// Note: The below method only when problem_size_K <= 256 for signed int8 gemm
/// or problem_size_K <= 128 for unsigned int8 gemm. The default approach is
/// above.
/// TODO: Add logic to fallback to the default approach
template <
/// Data type used to load and store< tensors
typename ElementOutput_,
/// Number of elements computed per operation
int Count,
///< Control Alpha and Beta scaling
ScaleType::Kind Scale = ScaleType::Default,
/// Rounding mode
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest>
class FastLinearCombinationClamp {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = int;
using ElementCompute = float;
static_assert(
platform::numeric_limits<ElementOutput>::is_integer,
"This elementwise op expects the output to be int.");
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = false;
/// Host-constructable parameters structure
struct Params {
/// scales accumulators
ElementCompute alpha;
/// scales source tensor
ElementCompute beta;
/// pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *alpha_ptr;
/// pointer to source scalar - if not null, loads it from memory
ElementCompute const *beta_ptr;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params()
: alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute alpha, ElementCompute beta)
: alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute alpha)
: alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr)
: alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute const *alpha_ptr)
: alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) {}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host
/// memory
CUTLASS_HOST_DEVICE
FastLinearCombinationClamp(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(FragmentAccumulator const &accumulator,
FragmentOutput const &source,
ElementCompute uniform = ElementCompute(0)) const {
// Convert source to interal compute numeric type
FastNumericArrayConverter<ElementCompute, ElementOutput, kCount, Round>
source_converter;
FastNumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
accumulator_converter;
ComputeFragment converted_source = source_converter(source);
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Compute linear scaling in floating point
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_source;
multiply_add<ComputeFragment> mul_add_accumulator;
minimum<ComputeFragment> min_accumulator;
maximum<ComputeFragment> max_accumulator;
// Float min-max
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate =
mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator,
intermediate); // D = alpha * Accum + X
}
/// Clamping constant value
ElementCompute const kClamp =
ElementCompute(1 << (sizeof_bits<ElementOutput>::value - 1));
intermediate = max_accumulator(intermediate, -kClamp);
intermediate = min_accumulator(intermediate, kClamp - ElementCompute(1));
// Convert to destination numeric type
FastNumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
FastNumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
accumulator_converter;
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Compute linear scaling in floating point
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_accumulator;
minimum<ComputeFragment> min_accumulator;
maximum<ComputeFragment> max_accumulator;
// Float min-max
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_accumulator(alpha_, converted_accumulator);
}
/// Clamping constant value
ElementCompute const kClamp =
ElementCompute(1 << (sizeof_bits<ElementOutput>::value - 1));
intermediate = max_accumulator(intermediate, -kClamp);
intermediate = min_accumulator(intermediate, kClamp - ElementCompute(1));
// Convert to destination numeric type
FastNumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
| include/cutlass/epilogue/thread/linear_combination_clamp.h/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_clamp.h",
"repo_id": "include",
"token_count": 7901
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination with elementwise
*/
#pragma once
#include "cutlass/half.h"
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/constants.h"
#include "cutlass/fast_math.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementCompute_, ///< Data type returned by this functor
typename ElementAccumulator_, ///< Data type of accumulators
typename ElementSource_, ///< Data type of source tensor
typename ElementTensor_, ///< Data type of additional tensor
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class LinearCombinationWithElementwise {
public:
using ElementOutput = ElementSource_;
using ElementCompute = ElementCompute_;
using ElementAccumulator = ElementAccumulator_;
using ElementSource = ElementSource_;
using ElementTensor = ElementTensor_;
static bool const kIsHeavy = true;
static int const kCount = Count;
using FragmentCompute = Array<ElementCompute, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentSource = Array<ElementSource, kCount>;
using FragmentTensor = Array<ElementTensor, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute threshold; ///< minimum value that is output
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
threshold(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta,
ElementCompute threshold = ElementCompute(0)
): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr,
ElementCompute threshold = ElementCompute(0)
): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
ElementCompute threshold_;
bool participates_in_reduction_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationWithElementwise(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
threshold_ = params.threshold;
participates_in_reduction_ = true;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return beta_ != ElementCompute(0);
}
/// Returns true if the threadblock computes the reduction
CUTLASS_HOST_DEVICE
bool participates_in_reduction() const {
return participates_in_reduction_;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
// set to NaN to make ReLU no-op for all except last k partitions
int64_t allones = -1;
threshold_ = reinterpret_cast<ElementCompute const &>(allones);
// Avoid computing the reduction if this isn't the final Split-K slice
participates_in_reduction_ = false;
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentCompute operator()(
FragmentAccumulator const &accumulator,
FragmentSource const &source,
FragmentTensor const &tensor) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
return intermediate;
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentCompute operator()(
FragmentAccumulator const &accumulator,
FragmentTensor const &tensor) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_accumulator;
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
return intermediate;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/linear_combination_with_elementwise.h/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_with_elementwise.h",
"repo_id": "include",
"token_count": 2835
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator with reduction over each column
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename TensorTileIterator_, ///< Additional tile iterator for tensor-valued operands
typename ElementVector_, ///< Pointer to reduction vector
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator
typename ReductionOp_, ///< Reduction operator
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(!IsEpilogueFunctorHeavy<OutputOp_>::value)
>
class EpilogueWithReduction :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_> {
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using TensorTileIterator = TensorTileIterator_;
using ElementVector = ElementVector_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using ReductionOp = ReductionOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
static bool const kIsSingleSource = true;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Compute data type produced by the output op
using ElementCompute = typename OutputOp::ElementCompute;
/// Compute fragment
using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>;
/// Thread map used by output tile iterators
using ThreadMap = typename OutputTileIterator::ThreadMap;
/// Fragment object used in reduction
using ReductionFragment = Array<
ElementAccumulator,
ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Data type of additional tensor
using ElementTensor = typename TensorTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>;
/// Tensor access type
using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
/// Shared memory allocation from epilogue base class
using BaseSharedStorage = typename Base::SharedStorage;
/// Used for the reduction
struct ReductionDetail {
/// If true, accumulator coordinates are computed and out-of-bounds checks are enabled when
/// performing the reduction.
static bool const kOobCheck = false;
/// Number of threads per warp
static int const kWarpSize = 32;
/// Number of distinct scalar column indices handled by each thread
static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess;
/// Number of distinct scalar row indices handled by each thread
static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn;
/// Number of threads per threadblock
static int const kThreadCount = kWarpSize * WarpCount::kCount;
/// Number of distinct threads per row of output tile
static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread);
/// Number of distinct threads which must be reduced during the final reduction phase within the threadblock.
static int const kThreadRows = kThreadCount / kThreadsPerRow;
/// I'm not sure what I meant here.
static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount);
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
kThreadRows,
Shape::kN
>;
/// Debug printing
CUTLASS_DEVICE
static void print() {
#if 0
printf("ReductionDetail {\n");
printf(
" kElementsPerAccess:%d\nkColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n"
"kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n",
kElementsPerAccess,
kColumnsPerThread,
kRowsPerThread,
kThreadCount,
kThreadsPerRow,
kThreadRows,
kThreadAccessesPerRow,
StorageShape::kRow,
StorageShape::kColumn,
StorageShape::kCount
);
printf("};\n");
#endif
}
};
/// Shared storage structure (shadows base) with additional SMEM buffer for reduction
struct SharedStorage {
union {
BaseSharedStorage base;
AlignedArray<ElementAccumulator, ReductionDetail::StorageShape::kCount, 16> reduction; ///< Shared storage for reduction
};
CUTLASS_HOST_DEVICE
SharedStorage() { }
};
public:
static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Shared memory pointer fo rreduction
ElementAccumulator *reduction_ptr_;
/// Thread index within the threadblock
int thread_idx_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithReduction(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage.base, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.base.reference(), thread_idx),
reduction_ptr_(shared_storage.reduction.data()),
thread_idx_(thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
ElementVector * reduction_output_ptr, ///< Reduction output vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord()) {
ReductionFragment reduction_fragment;
reduction_fragment.clear();
if (!output_op.is_source_needed()) {
compute_source_not_needed_(
output_op,
reduction_fragment,
destination_iterator,
accumulators,
tensor_iterator,
problem_size,
threadblock_offset);
}
else {
compute_source_needed_(
output_op,
reduction_fragment,
destination_iterator,
accumulators,
source_iterator,
tensor_iterator,
problem_size,
threadblock_offset);
}
if (output_op.participates_in_reduction()) {
reduction_(problem_size, threadblock_offset, reduction_output_ptr, reduction_fragment);
}
}
private:
/// Perform the reduction
CUTLASS_DEVICE
void reduction_(
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset, ///< Problem size needed to guard against out-of-bounds accesses
ElementVector * reduction_output_ptr, ///< Reduction output vector
ReductionFragment const & reduction_fragment) {
//
// Store the partially reduced value to SMEM
//
// Guard against uses of the existing SMEM tile
__syncthreads();
using AccessType = AlignedArray<ElementAccumulator, ThreadMap::kElementsPerAccess>;
//
// Determine a compacted thread arrangement to store to SMEM.
//
int const kThreadsPerRow = Shape::kN / (ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess);
MatrixCoord thread_offset(
thread_idx_ / kThreadsPerRow,
(thread_idx_ % kThreadsPerRow) * ThreadMap::kElementsPerAccess);
//
// Each thread store its fragment to a SMEM
//
AccessType *aligned_reduction_ptr = reinterpret_cast<AccessType *>(
&reduction_ptr_[thread_offset.row() * Shape::kN + thread_offset.column()]);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&reduction_fragment);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int col_idx = column * ThreadMap::Delta::kColumn / ThreadMap::kElementsPerAccess;
aligned_reduction_ptr[col_idx] = frag_ptr[column];
}
__syncthreads();
//
// Now, threads are assigned several columns of the output. They fetch over all rows from
// the compacted SMEM tile and perform a reduction.
//
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < ReductionDetail::kThreadAccessesPerRow; ++j) {
int column_idx = thread_idx_ + j * ReductionDetail::kThreadCount;
ReductionOp reduction_op;
ElementAccumulator reduction_element = ElementAccumulator();
int output_column_idx = threadblock_offset.column() + column_idx;
if (column_idx < Shape::kN && output_column_idx < problem_size.column()) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ReductionDetail::kThreadRows; ++row) {
if (row) {
auto frag = reduction_ptr_[row * Shape::kN + column_idx];
reduction_element = reduction_op(reduction_element, frag);
}
else {
reduction_element = reduction_ptr_[column_idx];
}
}
// Store
reduction_output_ptr[column_idx] = ElementVector(reduction_element);
}
}
}
template<class Seq>
struct acc2smem;
template <size_t... Seq>
struct acc2smem<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
ReductionFragment &reduction_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additioanl tensor operand
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space
) {
//
// Iterator over warp-level accumulator fragment
//
typename TensorTileIterator::Fragment tensor_fragment;
tensor_fragment.clear();
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Convert and store fragment
//
tensor_iterator.load(tensor_fragment);
++tensor_iterator;
__syncthreads();
acc2smem<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
//
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
//
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_tile_offset({tile_row_offset , 0});
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0});
}
//
// Compute the output result
//
FragmentCompute compute_fragment;
apply_output_operator_source_not_needed_(
reduction_fragment,
compute_fragment,
output_op,
aligned_accum_fragment[0],
tensor_fragment,
destination_iterator);
//
// Store the final result
//
NumericArrayConverter<ElementOutput, ElementCompute, FragmentCompute::kElements> converter;
typename OutputTileIterator::Fragment output_fragment = converter(compute_fragment);
destination_iterator.store(output_fragment);
++destination_iterator;
}
}
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
ReductionFragment &reduction_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additioanl tensor operand
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space
) {
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
typename TensorTileIterator::Fragment tensor_fragment;
tensor_fragment.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_fragment.clear();
source_iterator.load(source_fragment);
++source_iterator;
tensor_iterator.load(tensor_fragment);
++tensor_iterator;
//
// Convert and store fragment
//
__syncthreads();
acc2smem<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_tile_offset({tile_row_offset , 0});
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0});
}
//
// Compute the output result
//
FragmentCompute compute_fragment;
apply_output_operator_(
reduction_fragment,
compute_fragment,
output_op,
aligned_accum_fragment[0],
source_fragment,
tensor_fragment,
destination_iterator);
//
// Convert and store the final result
//
NumericArrayConverter<ElementOutput, ElementCompute, FragmentCompute::kElements> converter;
typename OutputTileIterator::Fragment output_fragment = converter(compute_fragment);
destination_iterator.store(output_fragment);
++destination_iterator;
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
ReductionFragment &reduction_fragment,
FragmentCompute &compute_fragment,
OutputOp const &output_op, ///< Output operator
typename SharedLoadIterator::Fragment const &aligned_accum_fragment,
typename OutputTileIterator::Fragment const &source_fragment,
typename TensorTileIterator::Fragment const &tensor_fragment,
OutputTileIterator const & destination_iterator) {
ComputeAccessType *compute_frag_ptr =
reinterpret_cast<ComputeAccessType *>(&compute_fragment);
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
OutputAccessType const *source_frag_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment);
TensorAccessType const *tensor_frag_ptr =
reinterpret_cast<TensorAccessType const *>(&tensor_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
compute_frag_ptr[i] = output_op(accum_frag_ptr[i], source_frag_ptr[i], tensor_frag_ptr[i]);
}
//
// Partial reduction over each column
//
ReductionOp reduction_op;
typename OutputTileIterator::Mask mask;
destination_iterator.get_mask(mask);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ReductionDetail::kColumnsPerThread; ++column) {
int column_vector_idx = column / ThreadMap::kElementsPerAccess;
bool column_guard = mask.predicates[column_vector_idx];
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ReductionDetail::kRowsPerThread; ++row) {
bool fetch;
if (ReductionDetail::kOobCheck) {
int row_idx = (row % ThreadMap::Iterations::kRow);
int residual = (row / ThreadMap::Iterations::kRow);
int group_idx = (residual % ThreadMap::Iterations::kGroup);
residual = (residual / ThreadMap::Iterations::kGroup);
int cluster_idx = (residual % ThreadMap::Iterations::kCluster);
int row_offset = row_idx * ThreadMap::Delta::kRow
+ group_idx * ThreadMap::Delta::kGroup
+ cluster_idx * ThreadMap::Delta::kCluster;
int output_row = destination_iterator.thread_start_row() + row_offset;
fetch = (output_row < destination_iterator.extent_row() && column_guard);
}
else {
fetch = true;
}
ElementCompute value = ElementCompute();
if (fetch) {
value = compute_fragment[row * ReductionDetail::kColumnsPerThread + column];
}
reduction_fragment[column] = reduction_op(
reduction_fragment[column],
value);
}
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
ReductionFragment &reduction_fragment,
FragmentCompute &compute_fragment,
OutputOp const &output_op, ///< Output operator
typename SharedLoadIterator::Fragment const &aligned_accum_fragment,
typename TensorTileIterator::Fragment const &tensor_fragment,
OutputTileIterator const & destination_iterator
) {
ComputeAccessType *compute_frag_ptr =
reinterpret_cast<ComputeAccessType *>(&compute_fragment);
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
TensorAccessType const *tensor_frag_ptr =
reinterpret_cast<TensorAccessType const *>(&tensor_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
compute_frag_ptr[i] = output_op(accum_frag_ptr[i], tensor_frag_ptr[i]);
}
//
// Partial reduction over each column
//
ReductionOp reduction_op;
typename OutputTileIterator::Mask mask;
destination_iterator.get_mask(mask);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ReductionDetail::kColumnsPerThread; ++column) {
int column_vector_idx = column / ThreadMap::kElementsPerAccess;
bool column_guard = mask.predicates[column_vector_idx];
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ReductionDetail::kRowsPerThread; ++row) {
bool fetch;
if (ReductionDetail::kOobCheck) {
int row_idx = (row % ThreadMap::Iterations::kRow);
int residual = (row / ThreadMap::Iterations::kRow);
int group_idx = (residual % ThreadMap::Iterations::kGroup);
residual = (residual / ThreadMap::Iterations::kGroup);
int cluster_idx = (residual % ThreadMap::Iterations::kCluster);
int row_offset = row_idx * ThreadMap::Delta::kRow
+ group_idx * ThreadMap::Delta::kGroup
+ cluster_idx * ThreadMap::Delta::kCluster;
int output_row = destination_iterator.thread_start_row() + row_offset;
fetch = (output_row < destination_iterator.extent_row() && column_guard);
}
else {
fetch = true;
}
ElementCompute value = ElementCompute();
if (fetch) {
value = compute_fragment[row * ReductionDetail::kColumnsPerThread + column];
}
reduction_fragment[column] = reduction_op(
reduction_fragment[column],
value);
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_with_reduction.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_with_reduction.h",
"repo_id": "include",
"token_count": 10808
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/permute.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIteratorConv | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
bool ScatterD = false, ///< Scatter D operand or not
typename PermuteDLayout = layout::NoPermute, ///< Permute D operand or not
bool UseCUDAStore = false,
int Rank = 4
>
class PredicatedTileIteratorConv {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
static int const kRank = Rank;
using Layout = typename platform::conditional<kRank == 4,
layout::TensorNHWC,
layout::TensorNDHWC>::type;
using Stride = typename Layout::Stride;
static int const kStrideRank = Layout::kStrideRank;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using MappedLayout = layout::RowMajor;
using Index = typename MappedLayout::Index;
using LongIndex = typename MappedLayout::LongIndex;
using TensorCoord = typename MappedLayout::TensorCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static bool constexpr PermuteD = !layout::is_trivial_permute<PermuteDLayout>;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
/// Fast divmod objects divided by tensor extents
FastDivmod divmod[kStrideRank - 1];
Stride tensor_stride;
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout, conv::Conv2dProblemSize const &problem_size):
PredicatedTileIteratorParams(
layout.stride()[0] * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
) {
divmod[0] = FastDivmod(problem_size.Q);
divmod[1] = FastDivmod(problem_size.P);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStrideRank; ++i) {
tensor_stride[i] = layout.stride()[i];
}
}
CUTLASS_HOST_DEVICE
Params(Layout const &layout, conv::Conv3dProblemSize const &problem_size):
PredicatedTileIteratorParams(
layout.stride()[0] * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
) {
divmod[0] = FastDivmod(problem_size.Q);
divmod[1] = FastDivmod(problem_size.P);
divmod[2] = FastDivmod(problem_size.Z);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStrideRank; ++i) {
tensor_stride[i] = layout.stride()[i];
}
}
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
Params params_;
/// Byte-level pointer. This pointer is usually for both load() and store(), unless PermuteD is performed. When having PermuteD, byte_pointer_ is only for load().
uint8_t *byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in rows
Index extent_column_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// A thread's starting column
Index thread_start_column_;
/// Internal state counter
int state_[3];
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorConv(
Params const & params,
Element *pointer,
TensorCoord extent,
int thread_idx,
TensorCoord threadblock_offset = TensorCoord()
):
params_(params)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
extent_column_ = extent.column();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] = ((thread_offset.column()
+ ThreadMap::Delta::kColumn * c) < extent.column());
}
// Null pointer performs no accesses
if (!pointer) {
mask_.clear();
}
// Initialize byte_pointer_
byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) +
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, int64_t byte_offset) const {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
Stride tensor_coord = CoordinateDecompositionLittleEndian<kStrideRank>(row_offset + thread_start_row_, params_.divmod);
LongIndex tensor_offset = dot(tensor_coord, params_.tensor_stride);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess + tensor_offset / kElementsPerAccess],
guard);
}
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) const {
uint8_t *byte_pointer = byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
Stride tensor_coord = CoordinateDecompositionLittleEndian<kStrideRank>((row_offset + thread_start_row_), params_.divmod);
LongIndex tensor_offset = dot(tensor_coord, params_.tensor_stride);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
if (UseCUDAStore) {
if (guard) {
memory_pointer[tensor_offset / kElementsPerAccess] =
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column];
}
} else {
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void *)&memory_pointer[tensor_offset / kElementsPerAccess],
guard);
}
memory_pointer += (ThreadMap::Delta::kColumn / kElementsPerAccess);
}
}
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) const {
store_with_byte_offset(frag, 0);
}
CUTLASS_DEVICE
MatrixCoord thread_start() const {
return MatrixCoord(thread_start_row_, thread_start_column_);
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_row() const {
return thread_start_row_;
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_column() const {
return thread_start_column_;
}
/// Extent of the matrix in rows
CUTLASS_DEVICE
Index extent_row() const {
return extent_row_;
}
/// Extent of the matrix in columns
CUTLASS_DEVICE
Index extent_column() const {
return extent_column_;
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorConv &operator++() {
++state_[0];
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
thread_start_row_ += ThreadMap::Shape::kGroup * ThreadMap::Shape::kRow
* ThreadMap::Shape::kCluster * ThreadMap::Shape::kTile;
}
}
}
return *this;
}
/// Advances a number of positions to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorConv &operator+=(int increment)
{
// Row
state_[0] += increment;
int increment_row = state_[0] / ThreadMap::Count::kRow;
state_[0] = state_[0] % ThreadMap::Count::kRow;
thread_start_row_ += (ThreadMap::Shape::kRow * increment);
// Group
state_[1] += increment_row;
int increment_group = state_[1] / ThreadMap::Count::kGroup;
state_[1] = state_[1] % ThreadMap::Count::kGroup;
thread_start_row_ +=
(ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow *
ThreadMap::Count::kRow *
increment_row;
// Cluster
state_[2] += increment_group;
int increment_cluster = state_[2] / ThreadMap::Count::kCluster;
state_[2] = state_[2] % ThreadMap::Count::kCluster;
thread_start_row_ +=
ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup *
ThreadMap::Count::kRow *
ThreadMap::Shape::kRow *
increment_group;
// Tile
thread_start_row_ +=
ThreadMap::Shape::kGroup *
ThreadMap::Shape::kRow *
ThreadMap::Shape::kCluster *
ThreadMap::Shape::kTile *
increment_cluster;
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) const {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/predicated_tile_iterator_conv.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/predicated_tile_iterator_conv.h",
"repo_id": "include",
"token_count": 6619
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/epilogue/warp/simt_policy.h"
#define CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES 1
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename Operator, ///< matrix multiply operation (concept: arch::Mma)
typename Element, ///< data type of element to be written
typename Layout, ///< target shared memory layout
typename MmaSimtPolicy ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimt;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename Operator_, ///< matrix multiply operation (concept: arch::Mma)
typename Element_, ///< data type of element to be written
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimt<WarpShape_, Operator_, Element_, layout::RowMajor, MmaSimtPolicy_> {
public:
using WarpShape = WarpShape_;
using Operator = Operator_;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
typename Operator::ElementC,
Policy::kElementsPerIteration>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
typename Operator::ElementC,
Policy::kAccumulatorElementCount>;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Padding quantity
using Padding = MatrixShape<
0,
4 * Policy::kElementsPerAccess
#if CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES
+ 1
#endif
>;
private:
#if CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES
/// Storage type for accessing memory
using AccessType = AlignedArray<
Element,
1
>;
#else
/// Storage type for accessing memory
using AccessType = AlignedArray<
Element,
Policy::kElementsPerAccess
>;
#endif
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorSimt(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimt(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements) {
auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
pointer_ += layout_({
lane_offset.row(),
lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements)
});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorSimt & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / AccessType::kElements;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimt & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
(tile_offset.column() * Shape::kColumn / int(AccessType::kElements))
});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimt & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
#if CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES
// de-vectorized stores
using ScalarAccessType = AlignedArray<Element, 1>;
ScalarAccessType const *scalarFragPtr = reinterpret_cast<ScalarAccessType const *>(&frag);
ScalarAccessType *scalarPointer = reinterpret_cast<ScalarAccessType *>(pointer_) + pointer_offset;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::kElementsPerAccess; s++) {
scalarPointer[n * Policy::MmaSimtPolicy::WarpShape::kColumn * Policy::kElementsPerAccess + s] = scalarFragPtr[n * Policy::kElementsPerAccess + s];
}
}
#else
// original vector stores
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)] = frag_ptr[n];
}
#endif
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
frag_ptr[n] = pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)];
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename Operator_, ///< matrix multiply operation (concept: arch::Mma)
typename Element_, ///< data type of element to be written
typename Layout_, ///< target shared memory layout
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimtDirectConv {
public:
using WarpShape = WarpShape_;
using Operator = Operator_;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>;
/// Shape of the tile in memory
using Shape = MatrixShape<Policy::kRowsPerIteration, WarpShape::kN>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<typename Operator::ElementC, Policy::kElementsPerIteration>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<typename Operator::ElementC, Policy::kAccumulatorElementCount>;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Padding quantity
using Padding = MatrixShape<0,
0
>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<
Element,
Policy::kElementsPerAccess
>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
/// Base smem offset;
Index base_smem_address_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv() : pointer_(nullptr) {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements) {
auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
pointer_ += layout_({
lane_offset.row(),
lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements)
});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / AccessType::kElements;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
(tile_offset.column() * Shape::kColumn / int(AccessType::kElements))
});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
// original vector stores
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
AccessType * load_pointer_ = reinterpret_cast<AccessType *>(reinterpret_cast<uint8_t *>(pointer_) + base_smem_address_);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
load_pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)] = frag_ptr[n];
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
frag_ptr[n] = pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)];
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address){
base_smem_address_ = address;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename ThreadOutputShape_, /// Size of the matrix to load (concept: TensorNHWC)
typename ThreadBlockOutputShape_, /// Size of the matrix to load (concept: TensorNHWC)
typename Operator_, ///< matrix multi ply operation (concept: arch::Mma)
typename Element_, ///< data type of element to be written
typename Layout_, ///< target shared memory layout
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimtDirect2dConv {
public:
using WarpShape = WarpShape_;
using ThreadOutputShape = ThreadOutputShape_;
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
using Operator = Operator_;
using Element = Element_;
using Layout = layout::RowMajor;
using MmaSimtPolicy = MmaSimtPolicy_;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
// Thread-level shape of a fragment
using ThreadShape = MatrixShape<ThreadOutputShape::kNHW, ThreadOutputShape::kC>;
static_assert(!(ThreadShape::kColumn % MmaSimtPolicy::LaneMmaShape::kN),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
using ThreadTileCount = MatrixShape<ThreadBlockOutputShape::kH / ThreadOutputShape::kH,
ThreadBlockOutputShape::kW / ThreadOutputShape::kW>;
using Iterations =
MatrixShape<ThreadShape::kRow, ThreadShape::kColumn / MmaSimtPolicy::LaneMmaShape::kN>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = typename Operator::FragmentC;
/// This is the fragment size produced by one access of the iterator.
using Fragment = AccumulatorTile;
/// Padding quantity
using Padding = MatrixShape<0, 0>;
private:
// Storage type for accessing memory
using AccessType = AlignedArray<Element, MmaSimtPolicy::LaneMmaShape::kN>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
/// Base smem offset;
Index base_smem_address_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorSimtDirect2dConv() : pointer_(nullptr) {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimtDirect2dConv(TensorRef const &ref, unsigned thread_id, unsigned lane_id)
: pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements) {
auto lane_layout = MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
// Get base HW offset of current threads
const int threadgroup = thread_id / (ThreadBlockOutputShape::kC / ThreadOutputShape::kC);
const int base_p = (threadgroup / (ThreadTileCount::kColumn)) * ThreadOutputShape::kH;
const int base_q = (threadgroup % (ThreadTileCount::kColumn)) * ThreadOutputShape::kW;
const int row_offset = base_p * ThreadBlockOutputShape::kW + base_q;
pointer_ += layout_(
{row_offset,
lane_offset.column() * MmaSimtPolicy::LaneMmaShape::kN / int(AccessType::kElements)});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorSimtDirect2dConv &add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / AccessType::kElements;
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType *storer_pointer_ =
reinterpret_cast<AccessType *>(reinterpret_cast<uint8_t *>(pointer_) + base_smem_address_);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int h = 0; h < ThreadOutputShape::kH; ++h) {
CUTLASS_PRAGMA_UNROLL
for (int w = 0; w < ThreadOutputShape::kW; ++w) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < Iterations::kColumn; ++col) {
int offset = (w + h * ThreadBlockOutputShape::kW) *
(ThreadBlockOutputShape::kC / AccessType::kElements) +
col;
storer_pointer_[offset + pointer_offset / int(AccessType::kElements)] =
frag_ptr[w + h * ThreadOutputShape::kW + col];
}
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) { base_smem_address_ = address; }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename Operator_, ///< matrix multiply operation (concept: arch::Mma)
typename Element_, ///< data type of element to be written
typename Layout_, ///< target shared memory layout
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimtCanonical {
public:
using WarpShape = WarpShape_;
using Operator = Operator_;
using Element = Element_;
using Layout = Layout_;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
typename Operator::ElementC,
Policy::kElementsPerIteration>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
typename Operator::ElementC,
Policy::kAccumulatorElementCount>;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Padding quantity
using Padding = MatrixShape<
0,
4 * Policy::kElementsPerAccess + 1
>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<
Element,
1
>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
/// Guard to indicate whether the shape is divisible
bool divisible_;
/// Extent of the output tensor
MatrixCoord extent_;
/// Thread offset
MatrixCoord thread_offset_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements),
divisible_(true),
extent_(WarpShape::kM, WarpShape::kN) {
auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
thread_offset_ = {
lane_offset.row() * Shape::kRow,
lane_offset.column() * Policy::kElementsPerAccess
};
pointer_ += layout_({
lane_offset.row() * Shape::kRow,
lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements)
});
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical(
TensorRef const &ref,
TensorCoord const &extent,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements),
divisible_(false),
extent_(extent) {
auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
thread_offset_ = {
lane_offset.row() * Shape::kRow,
lane_offset.column() * Policy::kElementsPerAccess
};
pointer_ += layout_({
lane_offset.row() * Shape::kRow,
lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements)
});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / AccessType::kElements;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical & add_tile_offset(TensorCoord const &tile_offset) {
MatrixCoord coord_offset(
tile_offset.row(),
tile_offset.column() * Shape::kColumn
);
thread_offset_ += coord_offset;
pointer_ += layout_({
coord_offset.row(),
coord_offset.column()
});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
// de-vectorized stores
using ScalarAccessType = AlignedArray<Element, 1>;
ScalarAccessType const *scalarFragPtr = reinterpret_cast<ScalarAccessType const *>(&frag);
ScalarAccessType *scalarPointer = reinterpret_cast<ScalarAccessType *>(pointer_) + pointer_offset;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::kElementsPerAccess; s++) {
int ptr_idx = n * Policy::MmaSimtPolicy::WarpShape::kColumn * Policy::kElementsPerAccess + s;
int frag_idx = n * Policy::kElementsPerAccess + s;
int col = thread_offset_.column() + ptr_idx;
if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) {
scalarPointer[ptr_idx] = scalarFragPtr[frag_idx];
}
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
// de-vectorized loads
using ScalarAccessType = AlignedArray<Element, 1>;
ScalarAccessType *scalarFragPtr = reinterpret_cast<ScalarAccessType *>(&frag);
ScalarAccessType const *scalarPointer = reinterpret_cast<ScalarAccessType const*>(pointer_) + pointer_offset;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::kElementsPerAccess; s++) {
int ptr_idx = n * Policy::MmaSimtPolicy::WarpShape::kColumn * Policy::kElementsPerAccess + s;
int frag_idx = n * Policy::kElementsPerAccess + s;
int col = thread_offset_.column() + ptr_idx;
if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) {
scalarFragPtr[frag_idx] = scalarPointer[ptr_idx];
}
}
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical & operator++() {
return add_tile_offset({1, 0});
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/warp/tile_iterator_simt.h/0 | {
"file_path": "include/cutlass/epilogue/warp/tile_iterator_simt.h",
"repo_id": "include",
"token_count": 9058
} | 34 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/tensor_predicate.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
class TileShape_,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm70TwoStageUnpredicated,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm70TwoStageUnpredicated;
using TileShape = TileShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}))));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}))));
struct SharedStorage
{
cute::array_aligned<ElementA, cute::cosize_v<SmemLayoutA>> smem_a;
cute::array_aligned<ElementB, cute::cosize_v<SmemLayoutB>> smem_b;
};
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A;
StrideA dA;
ElementB const* ptr_B;
StrideB dB;
};
// Device side kernel params
using Params = Arguments;
//
// Methods
//
CollectiveMma() = default;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& _, Arguments const& args, void* workspace) {
(void) workspace;
return args;
}
/// Perform a threadblock-scoped matrix multiply-accumulate
template <
class FrgTensorD,
class TensorA,
class TensorB,
class FrgTensorC,
class KTileIterator,
class ResidueMNK
>
CUTLASS_DEVICE void
operator() (
FrgTensorD &accum,
TensorA gA,
TensorB gB,
FrgTensorC const &src_accum,
KTileIterator k_tile_iter, int k_tile_count,
ResidueMNK residue_mnk,
int thread_idx,
char *smem_buf)
{
using namespace cute;
(void)residue_mnk;
static_assert(is_rmem<FrgTensorD>::value, "D tensor must be rmem resident.");
static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident.");
static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident.");
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 2,
"MainloopTwoStage must not have a smem shape with a pipeline mode.");
static_assert(cute::rank(SmemLayoutB{}) == 2,
"MainloopTwoStage must not have a smem shape with a pipeline mode.");
// Construct shared memory tiles
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf);
Tensor sA = make_tensor(make_smem_ptr(storage.smem_a.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(storage.smem_b.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
// Partition the copying of A and B tiles across the threads
GmemTiledCopyA gmem_tiled_copy_a;
GmemTiledCopyB gmem_tiled_copy_b;
auto copy_a_thr = gmem_tiled_copy_a.get_slice(thread_idx);
auto copy_b_thr = gmem_tiled_copy_b.get_slice(thread_idx);
Tensor tAgA = copy_a_thr.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k)
Tensor tAsA = copy_a_thr.partition_D(sA); // (ACPY,ACPY_M,ACPY_K)
Tensor tBgB = copy_b_thr.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k)
Tensor tBsB = copy_b_thr.partition_D(sB); // (BCPY,BCPY_N,BCPY_K)
// Allocate the register tiles for double buffering -- same shape as partitioned data
Tensor tArA = make_fragment_like(tAsA); // (ACPY,ACPY_M,ACPY_K)
Tensor tBrB = make_fragment_like(tBsB); // (BCPY,BCPY_N,BCPY_K)
// Tile MMA compute thread partitions and allocate accumulators
TiledMma tiled_mma;
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCrA = thr_mma.partition_fragment_A(sA); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.partition_fragment_B(sB); // (MMA,MMA_M,MMA_K)
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(src_accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(src_accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K
//
// Copy Atom retiling
//
auto thr_copy_A = make_tiled_copy_A(SmemCopyAtomA{}, tiled_mma).get_thread_slice(thread_idx);
Tensor tCsA = thr_copy_A.partition_S(sA);
Tensor tCrA_copy_view = thr_copy_A.retile_D(tCrA);
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // M
auto thr_copy_B = make_tiled_copy_B(SmemCopyAtomB{}, tiled_mma).get_thread_slice(thread_idx);
Tensor tCsB = thr_copy_B.partition_S(sB);
Tensor tCrB_copy_view = thr_copy_B.retile_D(tCrB);
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N
//
// Prologue
//
// Copy gmem to rmem for the first k_tile
copy(gmem_tiled_copy_a, tAgA(_,_,_,*k_tile_iter), tArA);
copy(gmem_tiled_copy_b, tBgB(_,_,_,*k_tile_iter), tBrB);
if (--k_tile_count > 0) ++k_tile_iter;
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
// Clear accumulators
__syncthreads();
// Load A, B smem->rmem for k=0
copy(tCsA(_,_,0), tCrA_copy_view(_,_,0));
copy(tCsB(_,_,0), tCrB_copy_view(_,_,0));
//
// Mainloop
//
// Size of the k-tiles's outer product mode (k)
auto K_BLOCK_MAX = size<2>(tCrA);
CUTLASS_PRAGMA_NO_UNROLL
while (k_tile_count > -1)
{
// Pipeline the outer products with a static for loop
for_each(make_int_sequence<K_BLOCK_MAX>{}, [&] (auto k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
__syncthreads();
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
__syncthreads();
}
// Load A, B smem->rmem for k+1
int k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static
copy(tCsA(_,_,k_block_next), tCrA_copy_view(_,_,k_block_next));
copy(tCsB(_,_,k_block_next), tCrB_copy_view(_,_,k_block_next));
if (k_block == 0)
{
// Copy gmem to rmem
copy(gmem_tiled_copy_a, tAgA(_,_,_,*k_tile_iter), tArA);
copy(gmem_tiled_copy_b, tBgB(_,_,_,*k_tile_iter), tBrB);
if (--k_tile_count > 0) ++k_tile_iter;
}
// transform before compute
cute::transform(tCrA(_,_,k_block), TransformA{});
cute::transform(tCrB(_,_,k_block), TransformB{});
// Thread-level register gemm for k
// disambiguate gemm (shared with the namespace name)
cute::gemm(tiled_mma, accum, tCrA(_,_,k_block), tCrB(_,_,k_block), src_accum);
});
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
class TileShape_,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm70TwoStage,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm70TwoStage;
using TileShape = TileShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}))));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}))));
struct SharedStorage
{
cute::array_aligned<ElementA, cute::cosize_v<SmemLayoutA>> smem_a;
cute::array_aligned<ElementB, cute::cosize_v<SmemLayoutB>> smem_b;
};
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A;
StrideA dA;
ElementB const* ptr_B;
StrideB dB;
};
// Device side kernel params
using Params = Arguments;
//
// Methods
//
CollectiveMma() = default;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& _, Arguments const& args, void* workspace) {
(void) workspace;
return args;
}
/// Perform a threadblock-scoped matrix multiply-accumulate
template <
class FrgTensorD,
class TensorA,
class TensorB,
class FrgTensorC,
class KTileIterator,
class ResidueMNK
>
CUTLASS_DEVICE void
operator() (
FrgTensorD &accum,
TensorA gA,
TensorB gB,
FrgTensorC const &src_accum,
KTileIterator k_tile_iter, int k_tile_count,
ResidueMNK residue_mnk,
int thread_idx,
char *smem_buf)
{
using namespace cute;
static_assert(is_rmem<FrgTensorD>::value, "D tensor must be rmem resident.");
static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident.");
static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident.");
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 2,
"MainloopTwoStage must not have a smem shape with a pipeline mode.");
static_assert(cute::rank(SmemLayoutB{}) == 2,
"MainloopTwoStage must not have a smem shape with a pipeline mode.");
// Construct shared memory tiles
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf);
Tensor sA = make_tensor(make_smem_ptr(storage.smem_a.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(storage.smem_b.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
// Shift tensor so residue_k is at origin (Can't read any k_coord < residue_k)
// This aligns the tensor with BLK_K for all but the 0th k_tile
gA.data() = &gA(0, get<2>(residue_mnk), 0);
gB.data() = &gB(0, get<2>(residue_mnk), 0);
// Partition the copying of A and B tiles across the threads
GmemTiledCopyA gmem_tiled_copy_a;
GmemTiledCopyB gmem_tiled_copy_b;
auto gmem_thr_copy_a = gmem_tiled_copy_a.get_slice(thread_idx);
auto gmem_thr_copy_b = gmem_tiled_copy_b.get_slice(thread_idx);
Tensor tAgA = gmem_thr_copy_a.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k)
Tensor tAsA = gmem_thr_copy_a.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE)
Tensor tBgB = gmem_thr_copy_b.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k)
Tensor tBsB = gmem_thr_copy_b.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE)
// Allocate the register tiles for double buffering -- same shape as partitioned data
Tensor tArA = make_fragment_like(tAsA); // (ACPY,ACPY_M,ACPY_K)
Tensor tBrB = make_fragment_like(tBsB); // (BCPY,BCPY_N,BCPY_K)
//
// PREDICATES
//
// Allocate predicate tensors for m and n
Tensor tApA = make_tensor<bool>(make_shape(size<1>(tAsA), size<2>(tAsA)), Stride<_1,_0>{});
Tensor tBpB = make_tensor<bool>(make_shape(size<1>(tBsB), size<2>(tBsB)), Stride<_1,_0>{});
// Construct identity layout for sA and sB
Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k)
Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k)
// Repeat the partitioning with identity layouts
Tensor tAcA = gmem_thr_copy_a.partition_S(cA); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k)
Tensor tBcB = gmem_thr_copy_b.partition_S(cB); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k)
// Set predicates for m bounds
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < size<0>(tApA); ++m) {
tApA(m,0) = get<0>(tAcA(0,m,0)) < get<0>(residue_mnk); // blk_m coord < residue_m
}
// Set predicates for n bounds
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < size<0>(tBpB); ++n) {
tBpB(n,0) = get<0>(tBcB(0,n,0)) < get<1>(residue_mnk); // blk_n coord < residue_n
}
//
// PREFETCH
//
// Clear the rmem tiles to account for predicated off loads
clear(tArA);
clear(tBrB);
// Start async loads for 0th k-tile, where we take care of the k residue
{
Tensor tAgAk = tAgA(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tArA); ++k) {
if (get<1>(tAcA(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gA shifted)
copy_if(gmem_tiled_copy_a, tApA(_,k), tAgAk(_,_,k), tArA(_,_,k));
}
}
Tensor tBgBk = tBgB(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tBrB); ++k) {
if (get<1>(tBcB(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gB shifted)
copy_if(gmem_tiled_copy_b, tBpB(_,k), tBgBk(_,_,k), tBrB(_,_,k));
}
}
++k_tile_iter;
--k_tile_count;
}
// Tile MMA compute thread partitions and allocate accumulators
TiledMma tiled_mma;
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCrA = thr_mma.make_fragment_A(thr_mma.partition_A(sA)); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.make_fragment_B(thr_mma.partition_B(sB)); // (MMA,MMA_M,MMA_K)
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(src_accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(src_accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K
//
// Copy Atom retiling
//
auto thr_copy_A = make_tiled_copy_A(SmemCopyAtomA{}, tiled_mma).get_thread_slice(thread_idx);
Tensor tCsA = thr_copy_A.partition_S(sA);
Tensor tCrA_copy_view = thr_copy_A.retile_D(tCrA);
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // M
auto thr_copy_B = make_tiled_copy_B(SmemCopyAtomB{}, tiled_mma).get_thread_slice(thread_idx);
Tensor tCsB = thr_copy_B.partition_S(sB);
Tensor tCrB_copy_view = thr_copy_B.retile_D(tCrB);
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N
//
// Prologue
//
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
// Clear accumulators
__syncthreads();
// Load A, B smem->rmem for k=0
copy(tCsA(_,_,0), tCrA_copy_view(_,_,0));
copy(tCsB(_,_,0), tCrB_copy_view(_,_,0));
//
// Mainloop
//
// Size of the k-tiles's outer product mode (k)
auto K_BLOCK_MAX = size<2>(tCrA);
CUTLASS_PRAGMA_NO_UNROLL
while (k_tile_count > -1)
{
// Pipeline the outer products with a static for loop
for_each(make_int_sequence<K_BLOCK_MAX>{}, [&] (auto k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
__syncthreads();
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
__syncthreads();
}
// Load A, B smem->rmem for k+1
int k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static
copy(tCsA(_,_,k_block_next), tCrA_copy_view(_,_,k_block_next));
copy(tCsB(_,_,k_block_next), tCrB_copy_view(_,_,k_block_next));
if (k_block == 0)
{
if (k_tile_count <= 0) {
clear(tApA);
clear(tBpB);
}
copy_if(gmem_tiled_copy_a, tApA, tAgA(_,_,_,*k_tile_iter), tArA);
copy_if(gmem_tiled_copy_b, tBpB, tBgB(_,_,_,*k_tile_iter), tBrB);
++k_tile_iter;
--k_tile_count;
}
// transform before compute
cute::transform(tCrA(_,_,k_block), TransformA{});
cute::transform(tCrB(_,_,k_block), TransformB{});
// Thread-level register gemm for k
// disambiguate gemm (shared with the namespace name)
cute::gemm(tiled_mma, accum, tCrA(_,_,k_block), tCrB(_,_,k_block), src_accum);
});
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/collective/sm70_mma_twostage.hpp/0 | {
"file_path": "include/cutlass/gemm/collective/sm70_mma_twostage.hpp",
"repo_id": "include",
"token_count": 10191
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level RankK definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/rank_k_universal.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_multistage_mma_complex.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op_blas3.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Blas3 computation mode
BlasMode BlasMode_ = BlasMode::kSymmetric>
struct DefaultRankKComplex;
////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation
ComplexTransform TransformA,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_
> struct RankKTransposedComplexTransform {
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformA;
};
// partial specializations for HERK CUBLAS_OP_N layout (ColumMajor)
template <>
struct RankKTransposedComplexTransform <
layout::ColumnMajor,
ComplexTransform::kNone,
BlasMode::kHermitian> {
static ComplexTransform const kTransformA = ComplexTransform::kConjugate;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
};
// partial specializations for HERK CUBLAS_OP_C layout (RowMajor + Complex conjugate)
template <>
struct RankKTransposedComplexTransform <
layout::RowMajor,
ComplexTransform::kConjugate,
BlasMode::kHermitian> {
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kConjugate;
};
}
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture complex datatype (symmetric)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultRankKComplex<
ElementA, LayoutA, ElementC,
layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
TransformA, Operator, SplitKSerial, BlasMode::kSymmetric> {
static BlasMode const kBlasMode = BlasMode::kSymmetric;
/// Define the threadblock-scoped matrix multiply-accumulate (A x B^T)
using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape, Stages,
TransformA, TransformA, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
/// Define the kernel-level RankK operator.
using RankKkernel = kernel::RankKUniversal<Mma, Epilogue, ThreadblockSwizzle, FillModeC>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture complex datatype (hermitian)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultRankKComplex<
ElementA, LayoutA, ElementC,
layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
TransformA, Operator, SplitKSerial, BlasMode::kHermitian> {
static BlasMode const kBlasMode = BlasMode::kHermitian;
// Complex transform for input A and B matrices (function on input layout)
static ComplexTransform const kTransformA = TransformA;
using TransposedComplexTransform = detail::RankKTransposedComplexTransform<
LayoutA,
TransformA,
kBlasMode>;
// Complex transform on operandA and operandB (function of blas3 computation)
static ComplexTransform const kTransformOperandA = TransposedComplexTransform::kTransformA;
static ComplexTransform const kTransformOperandB = TransposedComplexTransform::kTransformB;
/// Define the threadblock-scoped matrix multiply-accumulate (A x A^H)
using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape, Stages,
kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
/// Define the kernel-level RankK operator.
using RankKkernel = kernel::RankKUniversal<Mma, Epilogue, ThreadblockSwizzle, FillModeC>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture complex datatype (symmetric)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultRankKComplex<
ElementA, LayoutA, ElementC,
layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
TransformA, Operator, SplitKSerial, BlasMode::kSymmetric> {
static BlasMode const kBlasMode = BlasMode::kSymmetric;
/// Define the threadblock-scoped matrix multiply-accumulate (A x B^T)
using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
TransformA, TransformA, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
/// Define the kernel-level RankK operator.
using RankKkernel = kernel::RankKUniversal<Mma, Epilogue, ThreadblockSwizzle, FillModeC>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture complex datatype (hermitian)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultRankKComplex<
ElementA, LayoutA, ElementC,
layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
TransformA, Operator, SplitKSerial, BlasMode::kHermitian> {
static BlasMode const kBlasMode = BlasMode::kHermitian;
// Complex transform for input A and B matrices (function on input layout)
static ComplexTransform const kTransformA = TransformA;
using TransposedComplexTransform = detail::RankKTransposedComplexTransform<
LayoutA,
TransformA,
kBlasMode>;
// Complex transform on operandA and operandB (function of blas3 computation)
static ComplexTransform const kTransformOperandA = TransposedComplexTransform::kTransformA;
static ComplexTransform const kTransformOperandB = TransposedComplexTransform::kTransformB;
/// Define the threadblock-scoped matrix multiply-accumulate (A x A^H)
using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
/// Define the kernel-level RankK operator.
using RankKkernel = kernel::RankKUniversal<Mma, Epilogue, ThreadblockSwizzle, FillModeC>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/default_rank_k_complex.h/0 | {
"file_path": "include/cutlass/gemm/kernel/default_rank_k_complex.h",
"repo_id": "include",
"token_count": 5386
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
namespace detail
{
template<typename ElementAlphaBeta, bool BetaIsZero>
struct GemvBatchedStridedEpilogueScaling
{
ElementAlphaBeta const & alpha;
ElementAlphaBeta const & beta;
CUTLASS_DEVICE
GemvBatchedStridedEpilogueScaling(ElementAlphaBeta& alpha_, ElementAlphaBeta& beta_) :
alpha(alpha_), beta(beta_)
{ }
template<typename FragmentCD, typename FragmentAccumulator>
CUTLASS_DEVICE
void operator()(FragmentAccumulator& accumulators,
FragmentCD const& fragment_C,
FragmentCD& fragment_D) const
{
using AccType = typename FragmentAccumulator::value_type;
using CDType = typename FragmentCD::value_type;
static_assert(FragmentCD::kElements == FragmentAccumulator::kElements,
"Mistmatch in fragment sizes.");
for (int i = 0; i < FragmentCD::kElements; ++i)
{
if (BetaIsZero)
{
fragment_D[i] = CDType(accumulators[i] * AccType(alpha));
}
else
{
fragment_D[i] = CDType(accumulators[i] * AccType(alpha)
+ AccType(fragment_C[i]) * AccType(beta));
}
}
}
};
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename GemvKernel, typename ElementAlphaBeta, bool BetaIsZero=false>
CUTLASS_DEVICE void GemvBatchedStridedDevice(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
ElementAlphaBeta beta,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_C,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
using ThreadBlockGemv = typename GemvKernel::ThreadBlockGemv;
using ThreadBlockSwizzle = typename GemvKernel::ThreadBlockSwizzle;
using EpilogueScale = detail::GemvBatchedStridedEpilogueScaling<ElementAlphaBeta, BetaIsZero>;
ThreadBlockSwizzle swizzler;
// Compute initial location in logical coordinates
BatchedGemmCoord tb_offset = swizzler.get_tile_offset();
int const batch_idx = swizzler.get_batch_idx();
// Offset to the batch
ref_A.add_pointer_offset(batch_idx*lda);
ref_B.add_pointer_offset(batch_idx*ldb);
// Construct iterators to A and B operands
typename GemvKernel::IteratorA::Params params_A(ref_A.layout());
typename GemvKernel::IteratorA iterator_A(
params_A,
ref_A.data(),
{ 1, problem_size.k() },
0,
{ 0, 0 });
typename GemvKernel::IteratorB::Params params_B(ref_B.layout());
typename GemvKernel::IteratorB iterator_B(
params_B,
ref_B.data(),
{ problem_size.k(), problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
//
// Main loop
//
// Construct thread-scoped matrix multiply
ThreadBlockGemv mma;
typename ThreadBlockGemv::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped gemv
mma(problem_size.mnk(), accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
typename GemvKernel::FragmentCD fragment_CD;
// Load C (skip if beta is zero)
if (!BetaIsZero)
{
tb_offset = swizzler.get_tile_offset();
ref_C.add_pointer_offset(batch_idx*ldc);
typename GemvKernel::IteratorCD::Params params_C(ref_C.layout());
typename GemvKernel::IteratorCD iterator_C(
params_C,
ref_C.data(),
{ 1, problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
iterator_C.load(fragment_CD);
}
// Apply alpha/beta scaling
EpilogueScale epilogue_scale(alpha, beta);
epilogue_scale(accumulators, fragment_CD, fragment_CD);
// Store D
tb_offset = swizzler.get_tile_offset();
ref_D.add_pointer_offset(batch_idx*ldd);
typename GemvKernel::IteratorCD::Params params_D(ref_D.layout());
typename GemvKernel::IteratorCD iterator_D(
params_D,
ref_D.data(),
{ 1, problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
iterator_D.store(fragment_CD);
}
template <typename GemvKernel, typename ElementAlphaBeta, bool BetaIsZero>
CUTLASS_GLOBAL void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
ElementAlphaBeta beta,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_C,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, BetaIsZero>(
problem_size, alpha, beta, ref_A, lda, ref_B, ldb, ref_C, ldc, ref_D, ldd
);
}
template <typename GemvKernel, typename ElementAlphaBeta>
CUTLASS_GLOBAL void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, true>(
problem_size, alpha, ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd
);
}
template <typename GemvKernel>
CUTLASS_GLOBAL void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
using ElementAlphaBeta = typename GemvKernel::IteratorCD::Element;
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, true>(
problem_size, ElementAlphaBeta(1), ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/gemv_batched_strided.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemv_batched_strided.h",
"repo_id": "include",
"token_count": 3192
} | 37 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/kernel_hardware_info.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/arch/reg_reconfig.h"
#include "cutlass/arch/mma_sm90.h"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/kernel/tile_scheduler.hpp"
#include "cutlass/pipeline/pipeline.hpp"
#include "cute/tensor.hpp"
///////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::kernel {
///////////////////////////////////////////////////////////////////////////////
template <
class ProblemShape_,
class CollectiveMainloop_,
class CollectiveEpilogue_,
class TileScheduler_
>
class GemmUniversal<
ProblemShape_,
CollectiveMainloop_,
CollectiveEpilogue_,
TileScheduler_,
cute::enable_if_t<cute::is_base_of_v<KernelCpAsyncWarpSpecializedCooperative, typename CollectiveMainloop_::DispatchPolicy::Schedule>>>
{
public:
//
// Type Aliases
//
using ProblemShape = ProblemShape_;
static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4,
"ProblemShape{} should be <M,N,K> or <M,N,K,L>");
// Mainloop derived types
using CollectiveMainloop = CollectiveMainloop_;
using TileShape = typename CollectiveMainloop::TileShape;
using TiledMma = typename CollectiveMainloop::TiledMma;
using ArchTag = typename CollectiveMainloop::ArchTag;
using ElementA = typename CollectiveMainloop::ElementA;
using StrideA = typename CollectiveMainloop::StrideA;
using ElementB = typename CollectiveMainloop::ElementB;
using StrideB = typename CollectiveMainloop::StrideB;
using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy;
using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator;
using ClusterShape = typename DispatchPolicy::ClusterShape;
using MainloopArguments = typename CollectiveMainloop::Arguments;
using MainloopParams = typename CollectiveMainloop::Params;
static_assert(ArchTag::kMinComputeCapability >= 90);
// Epilogue derived types
using CollectiveEpilogue = CollectiveEpilogue_;
using ElementC = typename CollectiveEpilogue::ElementC;
using StrideC = typename CollectiveEpilogue::StrideC;
using ElementD = typename CollectiveEpilogue::ElementD;
using StrideD = typename CollectiveEpilogue::StrideD;
using EpilogueArguments = typename CollectiveEpilogue::Arguments;
using EpilogueParams = typename CollectiveEpilogue::Params;
using TileSchedulerTag = TileScheduler_;
using TileScheduler = typename detail::TileSchedulerSelector<
TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler;
using TileSchedulerArguments = typename TileScheduler::Arguments;
using TileSchedulerParams = typename TileScheduler::Params;
using GmemTiledCopyA = typename CollectiveMainloop::GmemTiledCopyA;
using GmemTiledCopyB = typename CollectiveMainloop::GmemTiledCopyB;
static_assert(cute::size(GmemTiledCopyA{}) == cute::size(GmemTiledCopyB{}), "Number of threads in A/B tiled copies must be the same");
static constexpr uint32_t NumLoadWarpGroups = cute::size(GmemTiledCopyA{}) / NumThreadsPerWarpGroup;
static constexpr uint32_t NumMmaWarpGroups = cute::size(TiledMma{}) / NumThreadsPerWarpGroup;
static constexpr uint32_t NumWarpGroups = NumLoadWarpGroups + NumMmaWarpGroups;
static_assert(NumWarpGroups == 2 || NumWarpGroups == 3, "Number of warp groups must be 2 or 3 for good performance.");
static constexpr uint32_t MaxThreadsPerBlock = NumWarpGroups * NumThreadsPerWarpGroup;
static constexpr uint32_t MinBlocksPerMultiprocessor = 1;
// Kernel level shared memory storage
struct SharedStorage {
struct TensorStorage : cute::aligned_struct<128> {
using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage;
using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage;
MainloopTensorStorage mainloop;
EpilogueTensorStorage epilogue;
} tensors;
struct PipelineStorage : cute::aligned_struct<16> {
using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage;
using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage;
alignas(16) MainloopPipelineStorage mainloop;
alignas(16) EpiLoadPipelineStorage epi_load;
} pipelines;
};
static constexpr int SharedStorageSize = sizeof(SharedStorage);
// Device side arguments
struct Arguments {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopArguments mainloop{};
EpilogueArguments epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerArguments scheduler{};
};
// Kernel entry point API
struct Params {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopParams mainloop{};
EpilogueParams epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerParams scheduler{};
};
//
// Methods
//
// Convert to underlying arguments. In this case, a simple copy for the aliased type.
static
Params
to_underlying_arguments(Arguments const& args, void* workspace) {
CUTLASS_TRACE_HOST("to_underlying_arguments():");
auto problem_shape = args.problem_shape;
if constexpr (detail::IF_SWAP_AB<CollectiveMainloop>::value) {
// swap M/N
get<0>(problem_shape) = get<1>(args.problem_shape);
get<1>(problem_shape) = get<0>(args.problem_shape);
}
auto problem_shape_MNKL = append<4>(problem_shape, 1);
// Get SM count if needed, otherwise use user supplied SM count
int sm_count = args.hw_info.sm_count;
if (sm_count <= 0) {
CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n"
" For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count.");
sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id);
}
CUTLASS_TRACE_HOST("to_underlying_arguments(): Setting persistent grid SM count to " << sm_count);
KernelHardwareInfo hw_info{args.hw_info.device_id, sm_count};
TileSchedulerParams scheduler = TileScheduler::to_underlying_arguments(
problem_shape_MNKL, TileShape{}, ClusterShape{}, hw_info, args.scheduler, workspace);
return {
args.mode,
problem_shape,
CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace),
CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace),
hw_info,
scheduler
};
}
CUTLASS_HOST_DEVICE static
bool
can_implement(Arguments const& args) {
bool implementable = (args.mode == GemmUniversalMode::kGemm) or
(args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4);
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n");
return implementable;
}
implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop);
implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue);
implementable &= TileScheduler::can_implement(args.scheduler);
return implementable;
}
static
size_t
get_workspace_size(Arguments const& args) {
TileScheduler t;
return t.template get_workspace_size<ProblemShape, ElementAccumulator>(
args.scheduler, args.problem_shape, args.hw_info, NumMmaWarpGroups);
}
static
cutlass::Status
initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr,
CudaHostAdapter* cuda_adapter = nullptr) {
TileScheduler t;
return t.template initialize_workspace<ProblemShape, ElementAccumulator>(
args.scheduler, workspace, stream, args.problem_shape, args.hw_info, NumMmaWarpGroups);
}
// Computes the kernel launch grid shape based on runtime parameters
static dim3
get_grid_shape(Params const& params) {
// Given device SM count, set grid size s.t. we do not launch more thread blocks than we can run concurrently
TileSchedulerArguments args{};
if constexpr (!std::is_const_v<decltype(args.max_swizzle_size)>) {
args.max_swizzle_size = 1 << params.scheduler.log_swizzle_size_;
}
return TileScheduler::get_grid_shape(params.problem_shape, TileShape{}, ClusterShape{}, params.hw_info, args);
}
static dim3
get_block_shape() {
return dim3(MaxThreadsPerBlock, 1, 1);
}
CUTLASS_DEVICE
void
operator()(Params const& params, char* smem_buf) {
using namespace cute;
using X = Underscore;
// Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a.
#if ! defined(__CUDA_ARCH_FEAT_SM90_ALL)
printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n");
#else
static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
/* In the Cooperative kernel, one or multiple Consumers collaborate on the same tile */
enum class WarpGroupRole {
Producer = 0,
Consumer = 1,
};
// Kernel level shared memory storage
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(smem_buf);
int thread_idx = int(threadIdx.x);
int mma_thread_idx = thread_idx % size(TiledMma{});
int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup;
int warp_group_idx = canonical_warp_group_idx();
CUTLASS_ASSERT(warp_group_idx < NumWarpGroups);
WarpGroupRole warp_group_role = warp_group_idx < NumLoadWarpGroups ? WarpGroupRole::Producer : WarpGroupRole::Consumer;
// Mainloop Load pipeline
using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline;
typename MainloopPipeline::Params mainloop_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer;
}
mainloop_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup;
mainloop_pipeline_params.consumer_arv_count = NumMmaWarpGroups * NumThreadsPerWarpGroup;
MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params);
// Epilogue Load pipeline
using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline;
typename EpiLoadPipeline::Params epi_load_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer;
}
epi_load_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup;
epi_load_pipeline_params.consumer_arv_count = NumMmaWarpGroups * NumThreadsPerWarpGroup;
EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params);
// Epilogue Store pipeline
using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline;
typename EpiStorePipeline::Params epi_store_pipeline_params;
epi_store_pipeline_params.always_wait = true;
EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params);
// Initialize starting pipeline states for the collectives
// Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding)
typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state;
typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state;
// For the DMA Load (producer) we start with an opposite phase
// i.e., we skip all waits since we know that the buffer is indeed empty
PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state<MainloopPipeline>();
PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state<EpiLoadPipeline>();
PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state<EpiStorePipeline>();
// Separate out problem shape for convenience
// Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{});
auto M = get<0>(problem_shape_MNKL);
auto N = get<1>(problem_shape_MNKL);
auto K = get<2>(problem_shape_MNKL);
auto L = get<3>(problem_shape_MNKL);
// Represent the full tensors
Tensor mA_mkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_A), make_shape(M,K,L), params.mainloop.dA); //(m,k,l)
Tensor mB_nkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_B), make_shape(N,K,L), params.mainloop.dB); //(n,k,l)
// Get the appropriate blocks for this thread block -- potential for thread block locality
TiledMma tiled_mma;
auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K)
// Make tiled views, defer the slice
Tensor gA_mkl = local_tile(mA_mkl, blk_shape, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l)
Tensor gB_nkl = local_tile(mB_nkl, blk_shape, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l)
TileScheduler scheduler{params.scheduler};
auto work_tile_info = scheduler.get_current_work();
// In a warp specialized kernel, collectives expose data movement and compute operations separately
CollectiveMainloop collective_mainloop;
CollectiveEpilogue collective_epilogue{params.epilogue, shared_storage.tensors.epilogue};
// Wait for all threads in the thread block
__syncthreads();
if (warp_group_role == WarpGroupRole::Producer) {
while (work_tile_info.is_valid()) {
// Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape
auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl));
auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl));
auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, l_coord);
// Slice with our work tile coordinates to construct mainloop tensor views
Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k)
Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k)
// Get the number of K tiles to compute for this work as well as the starting K tile offset of the work.
auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape);
auto work_k_tile_start = TileScheduler::get_work_k_tile_start(work_tile_info);
auto k_tile_iter = cute::make_coord_iterator(idx2crd(work_k_tile_start, shape<2>(gA)), shape<2>(gA));
// Compute tile residues for predication
auto m_max_coord = M - size<0>(gA) * get<0>(blk_coord); // M - BLK_M * m_coord
auto n_max_coord = N - size<0>(gB) * get<1>(blk_coord); // N - BLK_N * n_coord
auto k_residue = K - size<1>(gA) * size<2>(gA); // K - BLK_K * k_coord_max
auto residue_mnk = make_tuple(m_max_coord, n_max_coord, k_residue);
collective_mainloop.load(
mainloop_pipeline,
mainloop_pipe_producer_state,
gA,
gB,
k_tile_iter, work_k_tile_count,
residue_mnk,
thread_idx,
shared_storage.tensors.mainloop
);
// Update starting pipeline state for the next tile
mainloop_pipe_producer_state.advance(work_k_tile_count);
if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler) &&
collective_epilogue.is_producer_load_needed()) {
epi_load_pipe_producer_state =
collective_epilogue.load(
epi_load_pipeline,
epi_load_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
tiled_mma,
warp_group_thread_idx,
shared_storage.tensors.epilogue
);
}
// Get next work tile
work_tile_info = fetch_next_work(work_tile_info, scheduler);
} // Scheduler work fetch loop
// Make sure all Consumer Warp Groups have been waited upon
collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state);
if (collective_epilogue.is_producer_load_needed()) {
collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state);
}
} // Producer Warp Group End
else if (warp_group_role == WarpGroupRole::Consumer) {
bool do_store_tail = false;
while (work_tile_info.is_valid()) {
// Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape
auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl));
auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl));
auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, l_coord);
auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape);
// Allocate the the accumulators for the (M,N) blk_shape
//
// MSVC CTAD breaks if we say "Tensor" here, so we use "auto" instead.
auto accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N)
collective_mainloop.mma(
mainloop_pipeline,
mainloop_pipe_consumer_state,
accumulators,
work_k_tile_count,
mma_thread_idx,
shared_storage.tensors.mainloop,
params.mainloop
);
// Make sure the math instructions are done and free buffers before entering the epilogue
collective_mainloop.mma_tail(
mainloop_pipeline,
mainloop_pipe_consumer_state,
work_k_tile_count
);
// Update starting mainloop pipeline state for the next tile
mainloop_pipe_consumer_state.advance(work_k_tile_count);
// Index of warp group within consumer warp groups
int consumer_warp_group_idx = canonical_warp_group_idx() - NumLoadWarpGroups;
// Perform reduction across splits, if needed
TileScheduler::fixup(
params.scheduler, work_tile_info, accumulators, NumMmaWarpGroups, consumer_warp_group_idx);
if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler)) {
// Epilogue and write to gD
auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] =
collective_epilogue.store(
epi_load_pipeline,
epi_load_pipe_consumer_state,
epi_store_pipeline,
epi_store_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
accumulators,
tiled_mma,
mma_thread_idx,
shared_storage.tensors.epilogue
);
epi_load_pipe_consumer_state = epi_load_pipe_consumer_state_next;
epi_store_pipe_producer_state = epi_store_pipe_producer_state_next;
do_store_tail = true;
}
// Get next work tile
work_tile_info = fetch_next_work(work_tile_info, scheduler);
} // Scheduler work fetch loop
if (do_store_tail) {
collective_epilogue.store_tail(
epi_load_pipeline,
epi_load_pipe_consumer_state,
epi_store_pipeline,
epi_store_pipe_producer_state
);
}
} // Consumer Warp Groups End
#endif
}
private:
// Kernel helper function to get next work unit
CUTLASS_DEVICE
typename TileScheduler::WorkTileInfo
fetch_next_work(
typename TileScheduler::WorkTileInfo& work_tile_info,
TileScheduler& scheduler) const {
// Check whether we should continue on with the current work unit. If this is the case,
// the work unit will have been updated in continue_current_work to reflect the new
// tile to be computed.
if (scheduler.continue_current_work(work_tile_info)) {
return work_tile_info;
}
// Get next work tile
scheduler.advance_to_next_work();
return scheduler.get_current_work();
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::kernel
| include/cutlass/gemm/kernel/sm90_gemm_warpspecialized_cooperative.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/sm90_gemm_warpspecialized_cooperative.hpp",
"repo_id": "include",
"token_count": 8859
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles conventional layouts for IDP4A
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_
>
struct Mma<
Shape_,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int32_t,
LayoutC_,
arch::OpMultiplyAdd,
bool> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = int8_t;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = layout::RowMajor;
/// Data type of operand B
using ElementB = int8_t;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = layout::ColumnMajor;
/// Element type of operand C
using ElementC = int32_t;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Underlying matrix multiply operator (concept: arch::Mma)
// Use 1x1x4 IDP4A sequence for bulk of computation
using ArchMmaOperator = arch::Mma<
gemm::GemmShape<1,1,4>,
1,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
arch::OpMultiplyAdd>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TensorRef<ElementC, LayoutC> d(
reinterpret_cast<ElementC *>(&D), LayoutC::packed({ Shape::kM, Shape::kN }));
// Copy accumulators
D = C;
/// Use 1x1x4 IDP4A sequence for bulk of computation
ArchMmaOperator mma;
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK / ArchMmaOperator::Shape::kK; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
MatrixCoord mn(m, n);
Array<int8_t, 4> const *ptr_A = reinterpret_cast<Array<int8_t, 4> const *>(&A);
Array<int8_t, 4> const *ptr_B = reinterpret_cast<Array<int8_t, 4> const *>(&B);
Array<int32_t, 1> tmp = reinterpret_cast<Array<int32_t, 1> &>(d.at(mn));
mma(
tmp,
ptr_A[m * Shape::kK / ArchMmaOperator::Shape::kK + k],
ptr_B[n * Shape::kK / ArchMmaOperator::Shape::kK + k],
tmp);
d.at(mn) = reinterpret_cast<int32_t &>(tmp);
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles conventional layouts for IDP4A
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_
>
struct Mma<
Shape_,
int8_t,
layout::ColumnMajor,
int8_t,
layout::RowMajor,
int32_t,
LayoutC_,
arch::OpMultiplyAdd,
int8_t> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = int8_t;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = layout::ColumnMajor;
/// Data type of operand B
using ElementB = int8_t;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = layout::RowMajor;
/// Element type of operand C
using ElementC = int32_t;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Underlying matrix multiply operator (concept: arch::Mma)
/// Use 1x1x4 IDP4A sequence for bulk of computation
using ArchMmaOperator = arch::Mma<
gemm::GemmShape<1,1,4>,
1,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
arch::OpMultiplyAdd>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TensorRef<ElementC, LayoutC> d(
reinterpret_cast<ElementC *>(&D), LayoutC::packed({ Shape::kM, Shape::kN }));
// Copy accumulators
D = C;
/// Underlying matrix multiply operator
ArchMmaOperator mma;
Array<int8_t, 4> const *ptr_A = reinterpret_cast<Array<int8_t, 4> const *>(&A);
Array<int8_t, 4> const *ptr_B = reinterpret_cast<Array<int8_t, 4> const *>(&B);
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK / ArchMmaOperator::Shape::kK; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
MatrixCoord mn(m, n);
Array<int32_t, 1> tmp = reinterpret_cast<Array<int32_t, 1> &>(d.at(mn));
mma(
tmp,
ptr_A[m + k * Shape::kM],
ptr_B[n + k * Shape::kN],
tmp);
d.at(mn) = reinterpret_cast<int32_t &>(tmp);
}
}
}
}
};
} // namespace thread
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/thread/mma_sm61.h/0 | {
"file_path": "include/cutlass/gemm/thread/mma_sm61.h",
"repo_id": "include",
"token_count": 2984
} | 39 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Transformation applied to A operand
typename TransformA_ = NumericArrayConverter<
typename SmemIteratorA_::Element,
typename IteratorA_::Element,
IteratorA_::Fragment::kElements>,
///
/// Transformation applied to B operand
typename TransformB_ = NumericArrayConverter<
typename SmemIteratorB_::Element,
typename IteratorB_::Element,
IteratorB_::Fragment::kElements>,
/// Used for partial specialization
typename Enable = bool
>
class MmaPipelined : public MmaBase<Shape_, Policy_, 2> {
public:
///< Base class
using Base = MmaBase<Shape_, Policy_, 2>;
using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory
using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory
using ElementC = ElementC_; ///< Data type of accumulator matrix
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
using Policy = Policy_; ///< Policy describing tuning details
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
//
// Dependent types
//
/// Fragment of operand A loaded from global memory
using FragmentA = typename IteratorA::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB = typename IteratorB::Fragment;
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Obtain the arch tag from the warp-level operator
using ArchTag = typename Policy::Operator::ArchTag;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
static_assert((Base::kStages==2), "MmaPipelined requires kStages set to value 2");
protected:
//
// Data members
//
/// Warp-level MMA operator
Operator warp_mma;
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
///< transformation applied to A fragment
TransformA transform_A_;
///< transformation applied to B fragment
TransformB transform_B_;
/// Shared memory write stage index
int smem_write_stage_idx;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaPipelined(
typename Base::SharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM
int thread_idx, ///< ID within the threadblock
int warp_idx, ///< ID of warp
int lane_idx, ///< ID of each thread within a warp
TransformA transform_A = TransformA(), ///< transformation applied to A fragment
TransformB transform_B = TransformB() ///< transformation applied to B fragment
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx),
transform_A_(transform_A),
transform_B_(transform_B),
smem_write_stage_idx(0)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
/// Advance shared memory write-iterators to the next stage
CUTLASS_DEVICE
void advance_smem_write_stage()
{
++this->smem_iterator_A_;
++this->smem_iterator_B_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
smem_write_stage_idx ^= 1;
}
/// Advance shared memory read- and write-iterators to the next stage
CUTLASS_DEVICE
void advance_smem_stages()
{
++this->smem_iterator_A_;
++this->smem_iterator_B_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
// wrap write stage
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
else
{
// wrap read stage
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
}
smem_write_stage_idx ^= 1;
}
/// GEMM prologue. Bootstrap the global->shared memory pipeline by fetching
/// the global fragments needed by the first kStages-1 threadblock mainloop iterations
CUTLASS_DEVICE
void prologue(
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B, ///< [in|out] iterator over B operand in global memory
int &gemm_k_iterations) ///< [in|out] number of threadblock mainloop iterations remaining
{
// The last kblock is loaded in the prolog
// Load A fragment from global A
FragmentA tb_frag_A;
tb_frag_A.clear();
iterator_A.load(tb_frag_A);
++iterator_A;
// Load B fragment from global B
FragmentB tb_frag_B;
tb_frag_B.clear();
iterator_B.load(tb_frag_B);
++iterator_B;
// Store A and B fragments to shared
this->smem_iterator_A_.store(transform_A_(tb_frag_A));
this->smem_iterator_B_.store(transform_B_(tb_frag_B));
// Advance write stage
advance_smem_write_stage();
}
/// Wait until we have at least one completed global fetch stage
CUTLASS_DEVICE
void gmem_wait()
{
__syncthreads();
}
/// Perform the specified number of threadblock mainloop iterations of matrix
/// multiply-accumulate. Assumes prologue has been initiated.
CUTLASS_DEVICE
void gemm_iters(
int gemm_k_iterations, ///< number of threadblock mainloop iterations
FragmentC &accum, ///< [in|out] accumulator tile
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B) ///< [in|out] iterator over B operand in global memory
{
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA warp_frag_A[2];
WarpFragmentB warp_frag_B[2];
// Load A fragment from shared A
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_A[0]);
++this->warp_tile_iterator_A_;
// Load B fragment from shared B
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_B_.load(warp_frag_B[0]);
++this->warp_tile_iterator_B_;
// Pair of fragments used to overlap global memory loads and math instructions;
FragmentA tb_frag_A;
FragmentB tb_frag_B;
// Avoid reading out of bounds
iterator_A.clear_mask(gemm_k_iterations <= 1);
iterator_B.clear_mask(gemm_k_iterations <= 1);
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(transform_A_(tb_frag_A));
this->smem_iterator_B_.store(transform_B_(tb_frag_B));
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Advance smem read and write stages
advance_smem_stages();
}
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k == 0) {
// Load fragment from global A
tb_frag_A.clear();
iterator_A.load(tb_frag_A);
++iterator_A;
// Load fragment from global B
tb_frag_B.clear();
iterator_B.load(tb_frag_B);
++iterator_B;
// Avoid reading out of bounds if this was the last loop iteration
iterator_A.clear_mask(gemm_k_iterations <= 2);
iterator_B.clear_mask(gemm_k_iterations <= 2);
}
warp_mma(
accum,
warp_frag_A[warp_mma_k % 2],
warp_frag_B[warp_mma_k % 2],
accum);
}
}
}
/// Prepares the class for another prologue.
CUTLASS_DEVICE
void wind_down()
{
// First, increment remaining warp tiles to catch it up with the write stage.
#pragma unroll
for (int warp_mma_k = 1; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k)
{
this->warp_tile_iterator_A_.set_kgroup_index(warp_mma_k);
this->warp_tile_iterator_B_.set_kgroup_index(warp_mma_k);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
}
// If we bumped the read iterators to the end of the circular buffer, wrap them around to
// align them with the write iterators
if (smem_write_stage_idx == 0)
{
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
int gemm_k_iterations, ///< number of iterations of the mainloop
FragmentC &accum, ///< destination accumulator tile
IteratorA iterator_A, ///< iterator over A operand in global memory
IteratorB iterator_B, ///< iterator over B operand in global memory
FragmentC const &src_accum) ///< source accumulator tile
{
// Prologue
prologue(iterator_A, iterator_B, gemm_k_iterations);
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Perform the MAC-iterations
gemm_iters(gemm_k_iterations, accum, iterator_A, iterator_B);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/mma_pipelined.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/mma_pipelined.h",
"repo_id": "include",
"token_count": 5846
} | 40 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of a warp tile
that participate in one warp-level mma operation.
Typically, this is used to access the accumulator tile/fragement of a warp-level mma operation.
The accumulator tile is then partitioned into smaller tiles/fragments that can be fed into
next warp-level mma operation.
This iterator is necessary to accomplish warp-level mma fusion where the accumulator tile is
reused as multiplicand tile for the next mma.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_conversion.h"
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of the accumulation tile shape (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Output operation on the fragment
typename OutputOp_>
class MmaTensorOpFragmentIterator;
// Partial specialization for col-major accumulator tile
template <
/// Shape of warp tile to load (concept: MatrixShape)
typename Shape_,
/// Shape of the warp accumulation tile (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Output operation on fragment
typename OutputOp_>
class MmaTensorOpFragmentIterator<Shape_, AccumulatorShape_, KBlocksColumn_, ElementAccumulator_, Element_,
cutlass::layout::ColumnMajor,
InstructionShape_, OutputOp_> {
public:
/// Shape of warp tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of the warp accumulation tile (concept: MatrixShape)
using AccumulatorShape = AccumulatorShape_;
/// KBlocks columns to compute residual
static int const kKBlockColumn = KBlocksColumn_;
/// Accumulator Element type
using ElementAccumulator = ElementAccumulator_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Output operation on fragment
using OutputOp = OutputOp_;
/// Number of participating threads
static int const kThreads = 32;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(
AccumulatorShape::kRow == Shape::kRow,
"Rows of Warp Accumulator must be the same as rows of warp");
static_assert(
!(AccumulatorShape::kColumn % Shape::kColumn),
"Shape of Warp Accumulator must be divisible by warp shape.");
static_assert(
!(kKBlockColumn % Shape::kColumn),
"KBlock size must be divisible by warp shape.");
/// Number of times this iterator can be incremented
static int const kIterations = AccumulatorShape::kCount / Shape::kCount;
};
private:
static int const kElementsPerAccess = InstructionShape::kM * InstructionShape::kN / kThreads;
/// Number of mma operations performed by a warp
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
/// Number of mma operations performed by the entire accumulator
using AccumulatorIterations = MatrixShape<AccumulatorShape::kRow / InstructionShape::kM,
AccumulatorShape::kColumn / InstructionShape::kN>;
/// Number of K iterations
static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn;
static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn;
static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn
* (AccumulatorShape::kRow / Shape::kRow);
static int const kResidualIndex = kResidualColumn / Shape::kColumn
* (AccumulatorShape::kRow / Shape::kRow);
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<Element, Shape::kCount / kThreads>;
/// Accumulator Fragment object
using AccumulatorFragment = Array<ElementAccumulator, AccumulatorShape::kCount / kThreads>;
/// Scale Bias Element Type
using ElementScaleBias = typename OutputOp::ElementCompute;
/// Scale Bias Fragment object
using ScaleBiasFragment = Array<ElementScaleBias, InstructionShape::kM * InstructionShape::kK / kThreads>;
private:
/// Internal access type
using AccessType = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentAccessType = Array<Element, kElementsPerAccess>;
using ScaleBiasAccessType = Array<ElementScaleBias, kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
/// Used to access residual tile first
bool is_residual_tile_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator(AccumulatorFragment const &accum)
: accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0), is_residual_tile_(true) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(is_residual_tile_ && index_ >= kKBlockColumnIterations) {
index_ = index_ - kKBlockColumnIterations + kResidualIndex;
is_residual_tile_ = false;
}
}
/// Increments
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator++() {
add_offset(1);
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator--() {
add_offset(-1);
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
int index = index_ * MmaIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; n++) {
for (int m = 0; m < MmaIterations::kRow; m++) {
int accumulator_access_offset =
n * AccumulatorIterations::kRow + m + index;
frag_ptr[m * MmaIterations::kColumn + n].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[m * MmaIterations::kColumn + n] = output_op(accumulators_[accumulator_access_offset]);
}
}
}
/// Loads a fragment from the referenced part of the accumulator tile
/// Then apply per-channel scale and bias
CUTLASS_HOST_DEVICE
void load(Fragment &frag, ScaleBiasFragment &scale,
ScaleBiasFragment &bias, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
ScaleBiasAccessType * scale_ptr = reinterpret_cast<ScaleBiasAccessType *>(&scale);
ScaleBiasAccessType * bias_ptr = reinterpret_cast<ScaleBiasAccessType *>(&bias);
int index = index_ * MmaIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; n++) {
for (int m = 0; m < MmaIterations::kRow; m++) {
int accumulator_access_offset =
n * AccumulatorIterations::kRow + m + index;
frag_ptr[m * MmaIterations::kColumn + n].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[m * MmaIterations::kColumn + n] =
output_op(accumulators_[accumulator_access_offset],
scale_ptr[n] /*scale*/, bias_ptr[n] /*bias*/);
}
}
}
};
// Partial specialization for row-major accumulator tile
template <
/// Shape of warp tile to load (concept: MatrixShape)
typename Shape_,
/// Shape of the warp accumulation tile (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Output operation on fragment
typename OutputOp_>
class MmaTensorOpFragmentIterator<Shape_, AccumulatorShape_, KBlocksColumn_, ElementAccumulator_, Element_,
cutlass::layout::RowMajor,
InstructionShape_, OutputOp_> {
public:
/// Shape of warp tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of the warp accumulation tile (concept: MatrixShape)
using AccumulatorShape = AccumulatorShape_;
/// KBlocks columns to compute residual
static int const kKBlockColumn = KBlocksColumn_;
/// Accumulator Element type
using ElementAccumulator = ElementAccumulator_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Output operation on fragment
using OutputOp = OutputOp_;
/// Number of participating threads
static int const kThreads = 32;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(
AccumulatorShape::kRow == Shape::kRow,
"Rows of Warp Accumulator must be the same as rows of warp");
static_assert(
!(AccumulatorShape::kColumn % Shape::kColumn),
"Shape of Warp Accumulator must be divisible by warp shape.");
static_assert(
!(kKBlockColumn % Shape::kColumn),
"KBlock size must be divisible by warp shape.");
/// Number of times this iterator can be incremented
static int const kIterations = AccumulatorShape::kCount / Shape::kCount;
};
private:
static int const kRowsPerIteration = 8;
static int const kColumnsPerIteration = 16;
static int const kElementsPerIteration = kRowsPerIteration * InstructionShape::kN / kThreads;
static int const kElementsPerAccess = kRowsPerIteration * kColumnsPerIteration / kThreads;
static int const kIterationsPerAccess = kElementsPerAccess / kElementsPerIteration;
// Number of iterations per actual instruction
static int const kIterationsPerInstruction = InstructionShape::kM / kRowsPerIteration;
static int const kAccessStride = kIterationsPerInstruction;
/// Number of mma operations performed by a warp
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
/// Number of mma operations performed by the entire accumulator
using AccumulatorIterations = MatrixShape<AccumulatorShape::kRow / InstructionShape::kM,
AccumulatorShape::kColumn / InstructionShape::kN>;
/// Number of Accesses in a warp
using AccessIterations = MatrixShape<MmaIterations::kRow * kIterationsPerInstruction,
MmaIterations::kColumn / kIterationsPerAccess>;
/// Number of K iterations
static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn;
static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn;
static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn;
static int const kResidualIndex = kResidualColumn / Shape::kColumn;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<Element, Shape::kCount / kThreads>;
/// Accumulator Fragment object
using AccumulatorFragment = Array<ElementAccumulator, AccumulatorShape::kCount / kThreads>;
/// Scale Bias Element Type
using ElementScaleBias = typename OutputOp::ElementCompute;
/// Scale Bias Fragment object
using ScaleBiasFragment = Array<ElementScaleBias, InstructionShape::kM * InstructionShape::kK / kThreads>;
private:
/// Internal access type
using AccessType = Array<ElementAccumulator, kElementsPerIteration>;
using FragmentAccessType = Array<Element, kElementsPerIteration>;
using ScaleBiasAccessType = Array<ElementScaleBias, kElementsPerIteration>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
/// Used to access residual tile first
bool is_residual_tile_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator(AccumulatorFragment const &accum)
: accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0), is_residual_tile_(true) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(is_residual_tile_ && index_ >= kKBlockColumnIterations) {
index_ = index_ - kKBlockColumnIterations + kResidualIndex;
is_residual_tile_ = false;
}
}
/// Increments
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator++() {
add_offset(1);
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator--() {
add_offset(-1);
return *this;
}
CUTLASS_HOST_DEVICE
void set_index(int idx) {
index_ = idx;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
int index = index_ * AccessIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < AccessIterations::kCount; i++) {
int accumulator_access_offset = index / AccessIterations::kCount * (MmaIterations::kColumn * kIterationsPerInstruction) +
(index % AccessIterations::kCount) / (AccessIterations::kColumn * kIterationsPerInstruction) *
AccumulatorIterations::kColumn * kIterationsPerInstruction +
(index % (AccessIterations::kColumn * kIterationsPerInstruction)) / kIterationsPerInstruction *
(kIterationsPerInstruction * kIterationsPerAccess) +
(index % kIterationsPerInstruction);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kIterationsPerAccess; j++) {
frag_ptr[i*kIterationsPerAccess + j].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[i*kIterationsPerAccess + j] = output_op(accumulators_[accumulator_access_offset + j * kAccessStride]);
}
index++;
}
}
/// Loads a fragment from the referenced part of the accumulator tile
/// Then apply per-channel scale and bias
CUTLASS_HOST_DEVICE
void load(Fragment &frag, ScaleBiasFragment &scale,
ScaleBiasFragment & bias, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
ScaleBiasAccessType * scale_ptr = reinterpret_cast<ScaleBiasAccessType *>(&scale);
ScaleBiasAccessType * bias_ptr = reinterpret_cast<ScaleBiasAccessType *>(&bias);
int index = index_ * AccessIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < AccessIterations::kCount; i++) {
int accumulator_access_offset = index / AccessIterations::kCount * (MmaIterations::kColumn * kIterationsPerInstruction) +
(index % AccessIterations::kCount) / (AccessIterations::kColumn * kIterationsPerInstruction) *
AccumulatorIterations::kColumn * kIterationsPerInstruction +
(index % (AccessIterations::kColumn * kIterationsPerInstruction)) / kIterationsPerInstruction *
(kIterationsPerInstruction * kIterationsPerAccess) +
(index % kIterationsPerInstruction);
int scale_bias_offset = (index
% (kIterationsPerInstruction * AccessIterations::kColumn))
* kIterationsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kIterationsPerAccess; j++) {
frag_ptr[i*kIterationsPerAccess + j].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[i*kIterationsPerAccess + j] = output_op(
accumulators_[accumulator_access_offset + j * kAccessStride],
scale_ptr[scale_bias_offset + j], bias_ptr[scale_bias_offset + j]);
}
index++;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h",
"repo_id": "include",
"token_count": 7309
} | 41 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a class for using IEEE half-precision floating-point types in host or
device code.
*/
#pragma once
#ifndef CUTLASS_ENABLE_F16C
#define CUTLASS_ENABLE_F16C 0
#endif
#if defined(__CUDACC_RTC__)
#include "cutlass/floating_point_nvrtc.h"
// F16C extensions are not meaningful when compiling for NVRTC which only accommodates device code.
#undef CUTLASS_ENABLE_F16C
#define CUTLASS_ENABLE_F16C 0
#else
//
// Standard Library headers belong here to avoid conflicts with NVRTC.
//
#include <cmath>
#include <limits>
#include <cstdint>
#include <cstring>
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
#include <cuda_fp16.h>
#include "cutlass/cutlass.h"
#include "cutlass/float8.h"
#include "cutlass/platform/platform.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// Optionally target F16C extentions to accelerate half-precision conversion.
#if !defined(__CUDA_ARCH__) && (CUTLASS_ENABLE_F16C)
#if defined(_MSC_VER)
#include <immintrin.h>
#if defined(__i386__) || defined(__x86_64__)
#include <intrin.h>
#endif
#define F16C_ROUND_NEAREST 0
#if !defined(__CUDA_ARCH__)
extern __inline float _cvtsh_ss (unsigned short __S) {
__m128i packed;
std::memcpy(&packed, &__S, sizeof(__S));
__m128 result = _mm_cvtph_ps(packed);
float flt;
std::memcpy(&flt, &result, sizeof(flt));
return flt;
}
__inline unsigned short _cvtss_sh (float __F, const int) {
__m128 packed;
std::memcpy(&packed, &__F, sizeof(__F));
__m128i result = _mm_cvtps_ph(packed, F16C_ROUND_NEAREST);
unsigned short u;
std::memcpy(&u, &result, sizeof(u));
return u;
}
#endif
#else
// Linux
#include <x86intrin.h>
#if defined(__i386__) || defined(__x86_64__)
#include <cpuid.h>
#endif
#define F16C_ROUND_NEAREST (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC)
#endif // _MSC_VER
class CpuId {
bool f16c_enabled;
CpuId() {
#if defined(__i386__) || defined(__x86_64__)
#if defined(_MSC_VER)
int exx[4];
__cpuid (exx, 1);
f16c_enabled = exx[2] & 0x20000000;
#else
// GCC / Clang
int eax, ebx, ecx, edx;
__cpuid (1 , eax, ebx, ecx, edx);
f16c_enabled = ecx & 0x20000000;
#endif
#else
// Arm / PowerPC etc.
f16c_enabled = false;
#endif
}
public:
bool is_f16c_supported() const {
return f16c_enabled;
}
static const CpuId& instance() {
static CpuId cpu;
return cpu;
}
};
#endif // !defined(__CUDA_ARCH__) && CUTLASS_ENABLE_F16C
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// IEEE half-precision floating-point type
struct alignas(2) half_t {
//
// Data members
//
/// Storage type
uint16_t storage;
//
// Static conversion operators
//
/// Constructs from an unsigned short
CUTLASS_HOST_DEVICE
static half_t bitcast(uint16_t x) {
half_t h;
h.storage = x;
return h;
}
/// FP32 -> FP16 conversion - rounds to nearest even
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 530)
// Avoid inlining in device code if no hardware support
__device__ __noinline__
#else
CUTLASS_HOST_DEVICE
#endif
static half_t convert(float const& flt) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__float2half_rn(flt));
#else
#if !defined(__CUDA_ARCH__) && CUTLASS_ENABLE_F16C
if( CpuId::instance().is_f16c_supported() ) {
unsigned short u = _cvtss_sh(flt, F16C_ROUND_NEAREST);
return bitcast(u);
}
#endif
// software implementation rounds toward nearest even
unsigned s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<unsigned const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
uint16_t sign = uint16_t((s >> 16) & 0x8000);
int16_t exp = uint16_t(((s >> 23) & 0xff) - 127);
int mantissa = s & 0x7fffff;
uint16_t u = 0;
if ((s & 0x7fffffff) == 0) {
// sign-preserving zero
return bitcast(sign);
}
if (exp > 15) {
if (exp == 128 && mantissa) {
// not a number
u = 0x7fff;
} else {
// overflow to infinity
u = sign | 0x7c00;
}
return bitcast(u);
}
int sticky_bit = 0;
if (exp >= -14) {
// normal fp32 to normal fp16
exp = uint16_t(exp + uint16_t(15));
u = uint16_t(((exp & 0x1f) << 10));
u = uint16_t(u | (mantissa >> 13));
} else {
// normal single-precision to subnormal half_t-precision representation
int rshift = (-14 - exp);
if (rshift < 32) {
mantissa |= (1 << 23);
sticky_bit = ((mantissa & ((1 << rshift) - 1)) != 0);
mantissa = (mantissa >> rshift);
u = (uint16_t(mantissa >> 13) & 0x3ff);
} else {
mantissa = 0;
u = 0;
}
}
// round to nearest even
int round_bit = ((mantissa >> 12) & 1);
sticky_bit |= ((mantissa & ((1 << 12) - 1)) != 0);
if ((round_bit && sticky_bit) || (round_bit && (u & 1))) {
u = uint16_t(u + 1);
}
u |= sign;
return bitcast(u);
#endif
}
/// FP32 -> FP16 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static half_t convert(int const& n) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__int2half_rn(n));
#else
return convert(float(n));
#endif
}
/// FP32 -> FP16 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static half_t convert(unsigned const& n) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__uint2half_rn(n));
#else
return convert(float(n));
#endif
}
/// Converts a half-precision value stored as a uint16_t to a float
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 530)
// Avoid inlining in device code if no hardware support
__device__ __noinline__
#else
CUTLASS_HOST_DEVICE
#endif
static float convert(half_t const& x) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __half2float(x.to_half());
#else
#if !defined(__CUDA_ARCH__) && CUTLASS_ENABLE_F16C
if( CpuId::instance().is_f16c_supported() ) {
unsigned short u = x.storage;
return _cvtsh_ss(u);
}
#endif
uint16_t const &h = x.storage;
uint32_t sign = ((h >> 15) & 1);
uint32_t exp = ((h >> 10) & 0x1f);
uint32_t mantissa = (h & 0x3ff);
unsigned f = 0;
if (exp > 0 && exp < 31) {
// normal
exp += 112;
f = (sign << 31) | (exp << 23) | (mantissa << 13);
} else if (exp == 0) {
if (mantissa) {
// subnormal
exp += 113;
while ((mantissa & (1 << 10)) == 0) {
mantissa <<= 1;
exp--;
}
mantissa &= 0x3ff;
f = (sign << 31) | (exp << 23) | (mantissa << 13);
} else {
// sign-preserving zero
f = (sign << 31);
}
} else if (exp == 31) {
if (mantissa) {
f = 0x7fffffff; // not a number
} else {
f = (0xff << 23) | (sign << 31); // inf
}
}
#if defined(__CUDA_ARCH__)
return reinterpret_cast<float const&>(f);
#else
float flt;
std::memcpy(&flt, &f, sizeof(flt));
return flt;
#endif
#endif
}
//
// Methods
//
/// Default constructor
half_t() = default;
/// Reinterpret cast from CUDA's half type
CUTLASS_HOST_DEVICE
explicit half_t(half const & x) {
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint16_t const &>(x);
#else
__half_raw raw(x);
std::memcpy(&storage, &raw.x, sizeof(storage));
#endif
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit half_t(float x) {
storage = convert(x).storage;
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit half_t(double x): half_t(float(x)) {
}
/// float_e4m3_t conversion
CUTLASS_HOST_DEVICE
explicit half_t(float_e4m3_t x): half_t(float(x)) {
}
/// float_e5m2_t conversion
CUTLASS_HOST_DEVICE
explicit half_t(float_e5m2_t x): half_t(float(x)) {
}
/// Integer conversion - round to nearest even
CUTLASS_HOST_DEVICE
explicit half_t(int x) {
storage = convert(x).storage;
}
/// Integer conversion - round toward zero
CUTLASS_HOST_DEVICE
explicit half_t(unsigned x) {
storage = convert(x).storage;
}
/// Assignment
CUTLASS_HOST_DEVICE
half_t & operator=(half const &x) {
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint16_t const &>(x);
#else
__half_raw raw(x);
std::memcpy(&storage, &raw.x, sizeof(storage));
#endif
return *this;
}
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
return convert(*this);
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(convert(*this));
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(convert(*this));
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
return (convert(*this) != 0.0f);
}
/// Bitcasts to CUDA's half type
CUTLASS_HOST_DEVICE
half to_half() const {
#if defined(__CUDA_ARCH__)
return reinterpret_cast<half const &>(storage);
#else
__half_raw raw;
std::memcpy(&raw.x, &storage, sizeof(raw.x));
return half(raw);
#endif
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint16_t& raw() {
return storage;
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint16_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((storage & 0x8000) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((storage >> 10) & 0x1f);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 15;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(storage & 0x3ff);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool signbit(cutlass::half_t const& h) {
return ((h.raw() & 0x8000) != 0);
}
CUTLASS_HOST_DEVICE
cutlass::half_t abs(cutlass::half_t const& h) {
return cutlass::half_t::bitcast(h.raw() & 0x7fff);
}
CUTLASS_HOST_DEVICE
bool isnan(cutlass::half_t const& h) {
return (h.exponent_biased() == 0x1f) && h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isfinite(cutlass::half_t const& h) {
return (h.exponent_biased() != 0x1f);
}
CUTLASS_HOST_DEVICE
cutlass::half_t nanh(const char*) {
// NVIDIA canonical NaN
return cutlass::half_t::bitcast(0x7fff);
}
CUTLASS_HOST_DEVICE
bool isinf(cutlass::half_t const& h) {
return (h.exponent_biased() == 0x1f) && !h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isnormal(cutlass::half_t const& h) {
return h.exponent_biased() && h.exponent_biased() != 0x1f;
}
CUTLASS_HOST_DEVICE
int fpclassify(cutlass::half_t const& h) {
int exp = h.exponent_biased();
int mantissa = h.mantissa();
if (exp == 0x1f) {
if (mantissa) {
return FP_NAN;
}
else {
return FP_INFINITE;
}
}
else if (!exp) {
if (mantissa) {
return FP_SUBNORMAL;
}
else {
return FP_ZERO;
}
}
return FP_NORMAL;
}
CUTLASS_HOST_DEVICE
cutlass::half_t sqrt(cutlass::half_t const& h) {
#if defined(__CUDACC_RTC__)
return cutlass::half_t(sqrtf(float(h)));
#else
return cutlass::half_t(std::sqrt(float(h)));
#endif
}
CUTLASS_HOST_DEVICE
half_t copysign(half_t const& a, half_t const& b) {
uint16_t a_mag = (a.raw() & 0x7fff);
uint16_t b_sign = (b.raw() & 0x8000);
uint16_t result = (a_mag | b_sign);
return half_t::bitcast(result);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Standard Library operations and definitions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
#if !defined(__CUDACC_RTC__)
namespace std {
/// Numeric limits
template <>
struct numeric_limits<cutlass::half_t> {
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_infinity = true;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
static std::float_denorm_style const has_denorm = std::denorm_present;
static bool const has_denorm_loss = true;
static std::float_round_style const round_style = std::round_to_nearest;
static bool const is_iec559 = true;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = 10;
/// Least positive value
static cutlass::half_t min() { return cutlass::half_t::bitcast(0x0001); }
/// Minimum finite value
static cutlass::half_t lowest() { return cutlass::half_t::bitcast(0xfbff); }
/// Maximum finite value
static cutlass::half_t max() { return cutlass::half_t::bitcast(0x7bff); }
/// Returns smallest finite value
static cutlass::half_t epsilon() { return cutlass::half_t::bitcast(0x1800); }
/// Returns maximum rounding error
static cutlass::half_t round_error() { return cutlass::half_t(0.5f); }
/// Returns positive infinity value
static cutlass::half_t infinity() { return cutlass::half_t::bitcast(0x7c00); }
/// Returns quiet NaN value
static cutlass::half_t quiet_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns signaling NaN value
static cutlass::half_t signaling_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns smallest positive subnormal value
static cutlass::half_t denorm_min() { return cutlass::half_t::bitcast(0x0001); }
};
} // namespace std
#endif
namespace platform {
/// std::numeric_limits
template <class T>
struct numeric_limits;
/// Numeric limits
template <>
struct numeric_limits<cutlass::half_t> {
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_infinity = true;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
#if !defined(__CUDACC_RTC__)
static std::float_denorm_style const has_denorm = std::denorm_present;
#endif
static bool const has_denorm_loss = true;
#if !defined(__CUDACC_RTC__)
static std::float_round_style const round_style = std::round_to_nearest;
#endif
static bool const is_iec559 = true;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = 10;
/// Least positive value
CUTLASS_HOST_DEVICE
static cutlass::half_t min() { return cutlass::half_t::bitcast(0x0001); }
/// Minimum finite value
CUTLASS_HOST_DEVICE
static cutlass::half_t lowest() { return cutlass::half_t::bitcast(0xfbff); }
/// Maximum finite value
CUTLASS_HOST_DEVICE
static cutlass::half_t max() { return cutlass::half_t::bitcast(0x7bff); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::half_t epsilon() { return cutlass::half_t::bitcast(0x1800); }
/// Returns maximum rounding error
CUTLASS_HOST_DEVICE
static cutlass::half_t round_error() { return cutlass::half_t(0.5f); }
/// Returns positive infinity value
CUTLASS_HOST_DEVICE
static cutlass::half_t infinity() { return cutlass::half_t::bitcast(0x7c00); }
/// Returns quiet NaN value
CUTLASS_HOST_DEVICE
static cutlass::half_t quiet_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns signaling NaN value
CUTLASS_HOST_DEVICE
static cutlass::half_t signaling_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns smallest positive subnormal value
CUTLASS_HOST_DEVICE
static cutlass::half_t denorm_min() { return cutlass::half_t::bitcast(0x0001); }
};
} // namespace platform
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Arithmetic operators
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool operator==(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __heq(lhs.to_half(), rhs.to_half());
#else
return float(lhs) == float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator!=(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hne(lhs.to_half(), rhs.to_half());
#else
return float(lhs) != float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator<(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hlt(lhs.to_half(), rhs.to_half());
#else
return float(lhs) < float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator<=(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hle(lhs.to_half(), rhs.to_half());
#else
return float(lhs) <= float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator>(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hgt(lhs.to_half(), rhs.to_half());
#else
return float(lhs) > float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator>=(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hge(lhs.to_half(), rhs.to_half());
#else
return float(lhs) >= float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
half_t operator+(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hadd(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) + float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator-(half_t const& lhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hneg(lhs.to_half()));
#else
return half_t(-float(lhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator-(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hsub(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) - float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator*(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hmul(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) * float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator/(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hdiv(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) / float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t& operator+=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hadd(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) + float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator-=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hsub(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) - float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator*=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hmul(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) * float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator/=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hdiv(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) / float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator++(half_t & lhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hadd(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
++tmp;
lhs = half_t(tmp);
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator--(half_t & lhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hsub(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
--tmp;
lhs = half_t(tmp);
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t operator++(half_t & lhs, int) {
half_t ret(lhs);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hadd(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
tmp++;
lhs = half_t(tmp);
#endif
return ret;
}
CUTLASS_HOST_DEVICE
half_t operator--(half_t & lhs, int) {
half_t ret(lhs);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hsub(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
tmp--;
lhs = half_t(tmp);
#endif
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// User-defined literals
//
CUTLASS_HOST_DEVICE
cutlass::half_t operator "" _hf(long double x) {
return cutlass::half_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::half_t operator "" _hf(unsigned long long int x) {
return cutlass::half_t(int(x));
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/half.h/0 | {
"file_path": "include/cutlass/half.h",
"repo_id": "include",
"token_count": 9165
} | 42 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a Shape template for matrix tiles
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Describes the size of a matrix tile
template <
int Row_, ///< rows of a matrix
int Column_ ///< columns of a matrix
>
struct MatrixShape {
static int const kRow = Row_; ///< rows of a matrix
static int const kColumn = Column_; ///< columns of a matrix
static int const kCount = Row_ * Column_; ///< total number of elements in a matrix
//
// Static member functions
//
CUTLASS_HOST_DEVICE
static Coord<2> toCoord() {
return make_Coord(kRow, kColumn);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/matrix_shape.h/0 | {
"file_path": "include/cutlass/matrix_shape.h",
"repo_id": "include",
"token_count": 721
} | 43 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over densely packed tensors in global memory
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/layout/matrix.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_, ///< shape of CTA (concept: MatrixShape)
typename OutputOp_ , ///< output operator (concept: epilogue::thread operator)
typename ReductionOp_, ///< reduction operator (concept: ReductionOperator)
int PartitionsPerStage = 4 ///< number of partitions to issue
>
class ReduceSplitK {
public:
using Shape = Shape_;
using ReductionOp = ReductionOp_;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = OutputOp::kCount;
static int const kPartitionsPerStage = PartitionsPerStage;
using ElementWorkspace = typename ReductionOp::Element;
using ElementAccumulator = typename ReductionOp::ElementAccumulator;
using ElementOutput = typename OutputOp::ElementOutput;
using WorkspaceTensorRef = TensorRef<ElementWorkspace, layout::RowMajor>;
using OutputTensorRef = TensorRef<ElementOutput, layout::RowMajor>;
using StrideIndex = typename WorkspaceTensorRef::Layout::Stride::Index;
using FragmentWorkspace = AlignedArray<ElementWorkspace, kElementsPerAccess>;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentOutput = AlignedArray<ElementOutput, kElementsPerAccess>;
//
// Types
//
/// Params structure
struct Params {
MatrixCoord problem_size;
int partitions;
size_t partition_stride;
WorkspaceTensorRef workspace;
OutputTensorRef destination;
OutputTensorRef source;
typename OutputOp::Params output;
typename ReductionOp::Params reduction;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
MatrixCoord problem_size_,
int partitions_,
size_t partition_stride_,
WorkspaceTensorRef workspace_,
OutputTensorRef destination_,
OutputTensorRef source_,
typename OutputOp::Params output_ = typename OutputOp::Params(),
typename ReductionOp::Params reduction_ = typename ReductionOp::Params()
):
problem_size(problem_size_),
partitions(partitions_),
partition_stride(sizeof(FragmentWorkspace) * partition_stride_ / kElementsPerAccess),
workspace(workspace_),
destination(destination_),
source(source_),
output(output_),
reduction(reduction_) {
}
};
struct SharedStorage { };
public:
/// Computes the grid size given a chosen threadblock shape
CUTLASS_HOST_DEVICE
static dim3 grid_shape(
cutlass::MatrixCoord problem_size) {
return dim3(
(problem_size.row() + Shape::kRow - 1) / Shape::kRow,
(problem_size.column() + Shape::kColumn - 1) / Shape::kColumn);
}
/// Determines the threadblock shape
CUTLASS_HOST_DEVICE
static dim3 block_shape() {
return dim3(Shape::kColumn / kElementsPerAccess, Shape::kRow);
}
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &storage) {
// Determine CTA position
MatrixCoord thread_offset(
MatrixCoord::Index(int(blockIdx.x) * Shape::kRow + threadIdx.y),
MatrixCoord::Index(int(blockIdx.y) * Shape::kColumn + threadIdx.x * kElementsPerAccess)
);
// One guard conditional
if (!(thread_offset.row() < params.problem_size.row() &&
thread_offset.column() < params.problem_size.column())) {
return;
}
ReductionOp reduction_op(params.reduction);
FragmentAccumulator accumulator;
accumulator.clear();
//
// Load the first slice
//
char const *workspace_ptr =
reinterpret_cast<char const *>(
params.workspace.data() + params.workspace.offset(thread_offset));
FragmentWorkspace workspace_frag[kPartitionsPerStage];
//
// Construct the output operator
//
OutputOp output_op(params.output);
//
// Load and accumulate with a simple batched loading sequence.
//
CUTLASS_PRAGMA_NO_UNROLL
for (int k = 0; k < params.partitions; k += kPartitionsPerStage) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPartitionsPerStage; ++i) {
if (k + i < params.partitions) {
workspace_frag[i] = *reinterpret_cast<FragmentWorkspace const *>(workspace_ptr);
workspace_ptr += params.partition_stride;
}
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPartitionsPerStage; ++i) {
if (k + i < params.partitions) {
accumulator = reduction_op(accumulator, workspace_frag[i]);
}
}
}
//
// Conditionally load the source
//
FragmentOutput source_frag;
source_frag.clear();
FragmentOutput const *source_ptr = reinterpret_cast<FragmentOutput const *>(
params.source.data() + params.source.offset(thread_offset));
if (output_op.is_source_needed()) {
reinterpret_cast<FragmentOutput &>(source_frag) = *source_ptr;
}
//
// Compute the output
//
typename OutputOp::FragmentOutput output_frag = output_op(accumulator, source_frag);
//
// Store
//
FragmentOutput *dest_ptr = reinterpret_cast<FragmentOutput *>(
params.destination.data() + params.destination.offset(thread_offset));
*dest_ptr = reinterpret_cast<FragmentOutput const &>(output_frag);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
| include/cutlass/reduction/kernel/reduce_split_k.h/0 | {
"file_path": "include/cutlass/reduction/kernel/reduce_split_k.h",
"repo_id": "include",
"token_count": 2623
} | 44 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cstdint>
#include <string>
#define CUTLASS_MAJOR 3
#define CUTLASS_MINOR 5
#define CUTLASS_PATCH 0
#ifdef CUTLASS_VERSIONS_GENERATED
#include "cutlass/version_extended.h"
#else
#define CUTLASS_BUILD 0
#define CUTLASS_REVISION ""
#endif
#define CUTLASS_VERSION ((CUTLASS_MAJOR)*100 + (CUTLASS_MINOR)*10 + CUTLASS_PATCH)
namespace cutlass {
inline constexpr uint32_t getVersion() {
return CUTLASS_VERSION;
}
inline constexpr uint32_t getVersionMajor() {
return CUTLASS_MAJOR;
}
inline constexpr uint32_t getVersionMinor() {
return CUTLASS_MINOR;
}
inline constexpr uint32_t getVersionPatch() {
return CUTLASS_PATCH;
}
inline constexpr uint32_t getVersionBuild() {
return CUTLASS_BUILD + 0;
}
inline std::string getVersionString() {
std::string version = "@CUTLASS_VERSION@";
if (getVersionBuild()) {
version += "." + std::to_string(getVersionBuild());
}
return version;
}
inline std::string getGitRevision() {
return "@CUTLASS_REVISION@";
}
} // namespace cutlass
| include/cutlass/version.h/0 | {
"file_path": "include/cutlass/version.h",
"repo_id": "include",
"token_count": 888
} | 45 |
[README](../../README.md#documentation) > **CUTLASS 3.0 Design and Hierarchy**
# CUTLASS 3.0 Design
CUTLASS 3.0 is a major enhancement over the abstractions of CUTLASS 2.x
and aims to make usage of all layers of the GEMM hierarchy easier and more composable
while still achieving peak performance on Hardware.
## CUTLASS 3.0 design goals
CUTLASS 3.0 has the following design goals, in no particular order.
- Simplify expressing and manipulating data and thread layouts across
the GEMM hierarchy with CuTe layouts and layout algebra.
- Improve code readability and learning curve by
reducing the number of named types.
- Functional correctness by default,
actionable static asserts otherwise.
- Single, clear points of performance tuning and custom kernel extensions.
- Support for NVIDIA Hopper GPUs with great performance using
features such as Tensor Cores, tensor memory accelerator, and thread block clusters.
## A new Conceptual GEMM Hierarchy
CUTLASS 2.x decomposes the moving parts of a GEMM operation
across a hierarchy that closely mirrors the organization of GPU
architectures. This discussed in detail within the
[CUTLASS 2.x GEMM API documentation](/media/docs/gemm_api.md).
This design, however, sometimes results in a coupling that is too tight
to extend to newer GPU features that might not fit into the same architectural
hierarchy. For instance, Hopper's warp-group wide instructions do not naturally
fit into any warp or thread layer GEMM concept in CUTLASS 2.x. Even for Volta tensor cores,
instructions that atomically exist at the quad-pair granularity are first tiled at
the warp level before use. This hints at the brittleness of the abstraction power.
CUTLASS 3.0 detaches its interface layers from the hardware,
centering them instead around the natural structure of GEMM algorithms
not tied to any particular GPU generation.
This makes CUTLASS's code more robust to GPU architecture evolution,
less prone to implementation detail leakage, and provides users
with a consistent interface to hardware acceleration regardless of
the architecture specific details.
The new conceptual GEMM hierarchy is discussed in detail in the dedicated
[CUTLASS 3.0 GEMM API documentation readme](/media/docs/gemm_api_3x.md),
along with code examples of the core concepts and types.
## Adoption of CuTe Layout and Tensors
CUTLASS 3.0 introduces a new core library, CuTe, to describe and manipulate tensors of threads and data.
CuTe is a collection of C++ CUDA template abstractions for defining and operating on hierarchically multidimensional layouts of threads and data. CuTe provides `Layout` and `Tensor` objects that compactly packages the type, shape, memory space, and layout of data, while performing the complicated indexing for the user.
CUTLASS 3.0 adopts CuTe throughout the GEMM hierarchy in its templates, greatly simplifying the design,
improving code composability, and readability. More documentation specific to CuTe can be found in its [dedicated documentation directory](/media/docs/cute/00_quickstart.md).
![CuTe helps reduce named iterator types down to a single vocabulary type, `Layout`](/media/images/cutlass-reduction-in-named-iterators.png)
Programming massively parallel systems with various layers of logical thread and data hierarchies is not a trivial task.
- `cute::Layout`s always maintain logical consistency of their coordinates,
allowing us to check pre- and post-conditions at compile time for all static inner loops.
- Explicit thread to data mapping allows users and kernel authors to inspect and reason about operations
from a single point in the source code.
- Layouts provide a single point of performance tuning, as most optimizations can be done by careful
selection of thread and data layouts.
- Formalized algebra makes manipulation of and reasoning about thread->data mapping explicit in source code.
- Single vocabulary type (`cute::Layout`) subsumes every iterator and layout in CUTLASS 2.x CUTLASS 2.x uses many bespoke thread maps, iterators, and data layouts. Iterators are fundamentally 1-D, whereas most layouts we encounter in the GPU hierarchy are fundamentally n-D.
## Reducing the number of named types and iterator concepts
CUTLASS 2.x design preferred introducing bespoke named types for each
architecture specific thread and data layout. For instance, `gemm::treadblock` namespace
contains implementation for `MmaMultistage`, `MmaPlanarComplexMultistage`, `MmaPipelined` etc.
despite them providing mainloops for GEMMs. To spell these types the same way in generic code,
CUTLASS 2.x provides aliases through its `default_x_configuration.h` files, however,
these aliases make the code much harder to read as the user has to perform type substitution
mentally in order to understand the codebase.
CUTLASS 3.0 greatly reduces the number of named types used throughout by
- Replacing all iterator concepts for all memory domains with `cute::Tensor`s
- Dispatching mainloop and epilogue implementations on tag-dispatch policies rather than naming new types
- Dispatching kernel layer schedules on tag-dispatch policies rather than naming new types
Reducing the number of named types has many benefits:
- It *makes writing generic code easier*, as the primary type names share the same lexical
without aliasing through configuration providers.
- It *flattens the learning curve of CUTLASS* by greatly reducing the mental context required
as the library only exposes a handful of named types.
- It *provides a clear, singular extension point* for users to plug in their customizations
through the dispatch policies.
## Correctness by default, Performance through clear, individual points of tuning
CUTLASS 2.x maintained its thread layouts as implicit indexing math implemented
as a part of 1D iterators. This meant that the thread to data layout mapping
was implicit in the imperative structure of the C++ code itself and did not have
a formal algebra we could use to manipulate these mappings. Each iterator
had to re-implement its indexing and mapping logic. This made it hard to learn
how this mapping was performed for existing iterators, and even harder to
implement custom layout functions for the core inner loops of a GEMM.
CUTLASS 3.0 replaces all iterator concepts from CUTLASS 2.x
with a single layout type for thread and data tensors.
CuTe's formalized layout algebra is then used at every layer of
the GEMM hierarchy to manipulate the mapping between the two.
CuTe layouts always maintain logical consistency, and for fully static layouts
(such as in the core unrolled inner loops), provide
compile time checks that break builds if this consistency is violated.
In this way, CuTe reifies the thread-to-data-layout mapping,
makes it easier to write code that is "correct by construction".
If the code compiles, it's probably correct.
| media/docs/cutlass_3x_design.md/0 | {
"file_path": "media/docs/cutlass_3x_design.md",
"repo_id": "media",
"token_count": 1661
} | 46 |
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Code Organization")
[README](../../README.md#documentation) > **CUTLASS Utilities**
Note: This document discusses utilities commonly used with code that targets CUTLASS 2.x.
Although CUTLASS 3.0's primary entry point APIs do not transact in these `cutlass::*` tensor types anymore,
users can still find them convenient for managing allocations with trivial affine layouts.
For more advanced host side tensor management, [`cute::Tensor`](/media/docs/cute/03_tensor.md)s
can be used on either host or device for any memory space and full expressive power of
[`cute::Layout`](/media/docs/cute/01_layout.md)s.
# CUTLASS Utilities
CUTLASS utilities are additional template classes that facilitate recurring tasks. These are
flexible implementations of needed functionality, but they are not expected to be efficient.
Applications should configure their builds to list `/tools/util/include` in their include
paths.
Source code is in [`/tools/util/include/cutlass/util/`](/tools/util/include/cutlass/util).
## Tensor Allocation and I/O
To allocate a tensor with storage in both host and device memory, use `HostTensor` in
[`cutlass/util/host_tensor.h`](/tools/util/include/cutlass/util/host_tensor.h)
```c++
template <typename Element, typename Layout>
class HostTensor;
```
This class is compatible with all CUTLASS numeric data types and layouts.
**Example:** column-major matrix storage of single-precision elements.
```c++
#include <cutlass/layout/matrix.h>
#include <cutlass/util/host_tensor.h>
int main() {
int rows = 32;
int columns = 16;
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> tensor({rows, columns});
return 0;
}
```
Internal host-side storage may be accessed via the following methods.
```c++
float *host_ptr = tensor.host_data();
cutlass::TensorRef<float, cutlass::layout::ColumnMajor> host_ref = tensor.host_ref();
cutlass::TensorView<float, cutlass::layout::ColumnMajor> host_view = tensor.host_view();
```
Device memory may be accessed similarly.
```c++
float *device_ptr = tensor.device_data();
cutlass::TensorRef<float, cutlass::layout::ColumnMajor> device_ref = tensor.device_ref();
cutlass::TensorView<float, cutlass::layout::ColumnMajor> device_view = tensor.device_view();
```
Printing to human-readable CSV output is accoplished with `std::ostream::operator<<()` defined in
[`cutlass/util/tensor_view_io.h`](/tools/util/include/cutlass/util/tensor_view_io.h).
Note, this assumes all views refer to host memory.
```c++
#include <cutlass/util/tensor_view_io.h>
int main() {
// Obtain a TensorView into host memory
cutlass::TensorView<float, cutlass::layout::ColumnMajor> view = tensor.host_view();
// Print to std::cout
std::cout << view << std::endl;
return 0;
}
```
Host and device memory must be explicitly synchronized by the application.
```c++
float idx = 0;
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < columns; ++j) {
// Write the element at location {i, j} in host memory
tensor.host_ref().at({i, j}) = idx;
idx += 0.5f;
}
}
// Copy host memory to device memory
tensor.sync_device();
// Obtain a device pointer usable in CUDA kernels
float *device_ptr = tensor.device_data();
```
`HostTensor<>` is usable by all CUTLASS layouts including interleaved layouts.
```c++
int rows = 4;
int columns = 3;
cutlass::HostTensor<float, cutlass::layout::ColumnMajorInterleaved<4>> tensor({rows, columns});
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < columns; ++j) {
// Write the element at location {i, j} in host memory
tensor.host_ref().at({i, j}) = float(i) * 1.5f - float(j) * 2.25f;
}
}
std::cout << tensor.host_view() << std::endl;
```
## Device Allocations
To strictly allocate memory on the device using the smart pointer pattern to manage allocation and deallocation,
use `cutlass::DeviceAllocation<>`.
**Example:** allocating an array in device memory.
```c++
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor_view.h>
#include <cutlass/util/device_memory.h>
__global__ void kernel(float *device_ptr) {
}
int main() {
size_t N = 1024;
cutlass::DeviceAllocation<float> device_alloc(N);
// Call a CUDA kernel passing device memory as a pointer argument
kernel<<< grid, block >>>(alloc.get());
if (cudaGetLastError() != cudaSuccess) {
return -1;
}
// Device memory is automatically freed when device_alloc goes out of scope
return 0;
}
```
## Tensor Initialization
CUTLASS defines several utility functions to initialize tensors to uniform, procedural,
or randomly generated elements. These have implementations using strictly host code and
implementations using strictly CUDA device code.
`TensorFill()` for uniform elements throughout a tensor.
```c++
#include <cutlass/layout/matrix.h>
#include <cutlass/util/reference/host/tensor_fill.h>
#include <cutlass/util/reference/device/tensor_fill.h>
#include <cutlass/util/host_tensor.h>
int main() {
int rows = 128;
int columns = 64;
float x = 3.14159f;
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> tensor({rows, columns});
// Initialize in host memory
cutlass::reference::host::TensorFill(tensor.host_view(), x);
// Initialize in device memory
cutlass::reference::device::TensorFill(tensor.device_view(), x);
return 0;
}
```
`TensorFillRandomUniform()` for initializing elements to a random uniform distribution.
The device-side implementation uses CURAND to generate random numbers.
```c++
#include <cutlass/layout/matrix.h>
#include <cutlass/util/reference/host/tensor_fill.h>
#include <cutlass/util/reference/device/tensor_fill.h>
#include <cutlass/util/host_tensor.h>
int main() {
int rows = 128;
int columns = 64;
double maximum = 4;
double minimum = -4;
uint64_t seed = 0x2019;
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> tensor({rows, columns});
// Initialize in host memory
cutlass::reference::host::TensorFillRandomUniform(
tensor.host_view(),
seed,
maximum,
minimum);
// Initialize in device memory
cutlass::reference::device::TensorFillRandomUniform(
tensor.device_view(),
seed,
maximum,
minimum);
return 0;
}
```
`TensorFillRandomGaussian()` for initializing elements to a random gaussian distribution.
The device-side implementation uses CURAND to generate random numbers.
```c++
#include <cutlass/layout/matrix.h>
#include <cutlass/util/reference/host/tensor_fill.h>
#include <cutlass/util/reference/device/tensor_fill.h>
#include <cutlass/util/host_tensor.h>
int main() {
int rows = 128;
int columns = 64;
double mean = 0.5;
double stddev = 2.0;
uint64_t seed = 0x2019;
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> tensor({rows, columns});
// Initialize in host memory
cutlass::reference::host::TensorFillRandomGaussian(
tensor.host_view(),
seed,
mean,
stddev);
// Initialize in device memory
cutlass::reference::device::TensorFillRandomGaussian(
tensor.device_view(),
seed,
mean,
stddev);
return 0;
}
```
Each of these functions accepts an additional argument to specify how many bits of
the mantissa less than 1 are non-zero. This simplifies functional comparisons when
exact random distributions are not necessary, since elements may be restricted to
integers or values with exact fixed-point representations.
```c++
#include <cutlass/layout/matrix.h>
#include <cutlass/util/reference/host/tensor_fill.h>
#include <cutlass/util/reference/device/tensor_fill.h>
#include <cutlass/util/host_tensor.h>
int main() {
int rows = 128;
int columns = 64;
double mean = 0.5;
double stddev = 2.0;
uint64_t seed = 0x2019;
int bits_right_of_binary_decimal = 2;
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> tensor({rows, columns});
// Initialize in host memory
cutlass::reference::host::TensorFillRandomGaussian(
tensor.host_view(),
seed,
mean,
stddev,
bits_right_of_binary_decimal);
// Initialize in device memory
cutlass::reference::device::TensorFillRandomGaussian(
tensor.device_view(),
seed,
mean,
stddev,
bits_right_of_binary_decimal);
return 0;
}
```
These utilities may be used for all data types.
**Example:** random half-precision tensor with Gaussian distribution.
```c++
#include <cutlass/numeric_types.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/util/reference/host/tensor_fill.h>
#include <cutlass/util/reference/device/tensor_fill.h>
#include <cutlass/util/host_tensor.h>
int main() {
int rows = 128;
int columns = 64;
double mean = 0.5;
double stddev = 2.0;
uint64_t seed = 0x2019;
// Allocate a column-major tensor with half-precision elements
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> tensor({rows, columns});
// Initialize in host memory
cutlass::reference::host::TensorFillRandomGaussian(
tensor.host_view(),
seed,
mean,
stddev);
// Initialize in device memory
cutlass::reference::device::TensorFillRandomGaussian(
tensor.device_view(),
seed,
mean,
stddev);
return 0;
}
```
## Reference Implementations
CUTLASS defines reference implementations usable with all data types and layouts. These are
used throughout the unit tests.
**Example:** Reference GEMM implementation with mixed precision internal computation.
```c++
#include <cutlass/numeric_types.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/util/host_tensor.h>
#include <cutlass/util/reference/host/gemm.h>
int main() {
int M = 64;
int N = 32;
int K = 16;
float alpha = 1.5f;
float beta = -1.25f;
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A({M, K});
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B({K, N});
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C({M, N});
cutlass::reference::host::Gemm<
cutlass::half_t, cutlass::layout::ColumnMajor, // ElementA and LayoutA
cutlass::half_t, cutlass::layout::ColumnMajor, // ElementB and LayoutB
cutlass::half_t, cutlass::layout::ColumnMajor, // ElementC and LayoutC
float, // scalar type (alpha and beta)
float> gemm_op; // internal accumulation type
gemm_op(
{M, N, K}, // problem size
alpha, // alpha scalar
A.host_view(), // TensorView to host memory
B.host_view(), // TensorView to host memory
beta, // beta scalar
C.host_view(), // TensorView to host memory
D.host_view()); // TensorView to device memory
return 0;
}
```
# Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| media/docs/utilities.md/0 | {
"file_path": "media/docs/utilities.md",
"repo_id": "media",
"token_count": 4175
} | 47 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import subprocess
from cutlass_library import DataTypeTag
import pydot
from cutlass.backend.evt.ir.dag_ir import DAGIR
_COLOR_MAP = {
"load": '"AliceBlue"',
"compute": "LemonChiffon1",
"accumulator": "LightGrey",
"store": "PowderBlue",
"layout": "lightseagreen",
"dag": "darkorange"
}
class EVTGraphDrawer:
"""
Visualize a EVT DAGIR with graphviz
"""
def __init__(
self,
graph: DAGIR,
name: str
):
self._name = name
self._dot_graphs = {}
self._dot_graphs[name] = self._to_dot(graph, name)
def _get_node_style(self, node):
template = {
"shape": "record",
"fillcolor": "#CAFFE3",
"style": '"filled,rounded"',
"fontcolor": "#000000",
}
if node.op in _COLOR_MAP:
template["fillcolor"] = _COLOR_MAP[node.op]
else:
raise NotImplementedError("unknown node op")
if node.disabled:
template["fontcolor"] = "grey"
template["fillcolor"] = "white"
return template
def _get_node_label(self, node):
label = "{" + f"name={node.name}|op={node.op}"
if node.op == "layout":
label += f"|fn={node.fn.__name__}"
for key in node.kwargs:
label += f"|{key}={node.kwargs[key]}"
if node.underlying_impl is not None:
label += f"|impl={type(node.underlying_impl).__name__}"
if node.op == "load":
label += f"|element_output={DataTypeTag[node.underlying_impl.element]}"
elif node.op == "compute":
label += f"|element_compute={DataTypeTag[node.underlying_impl.element_compute]}|element_output={DataTypeTag[node.underlying_impl.element_output]}"
elif node.op == "store":
label += f"|element_store={DataTypeTag[node.underlying_impl.element]}|element_output={DataTypeTag[node.underlying_impl.element_output]}"
elif node.op == "dag":
label += f"|element_output={DataTypeTag[node.underlying_impl.element_output]}"
if node.tensor is not None:
shape = node.tensor.shape
stride = node.tensor.stride
label += f"|shape={shape}|stride={stride}"
if hasattr(node, "store_tensor"):
if node.store_tensor is not None:
store_shape = node.store_tensor.shape
store_stride = node.store_tensor.stride
label += f"|store_shape={store_shape}|stride_stride={store_stride}"
label += "}"
return label
def _to_dot(
self,
graph: DAGIR,
name: str
):
dot_graph = pydot.Dot(name, randir="TB")
for node in graph.nodes_meta:
style = self._get_node_style(node)
label = self._get_node_label(node)
dot_node = pydot.Node(
node.name, label=label, **style
)
dot_graph.add_node(dot_node)
if node.op == "dag":
dot_subgraph = self._to_dot(node.subgraph, name=node.name)
self._dot_graphs[node.name] = dot_subgraph
# Add edges
for src, dst in graph.edges:
weight = graph.get_edge_weight(src, dst)
dot_graph.add_edge(pydot.Edge(src, dst, label=weight))
return dot_graph
def get_dot_graph(self) -> pydot.Dot:
return [(key, self.get_dot_graph_by_name(key)) for key in self._dot_graphs.keys()]
def get_dot_graph_by_name(self, name) -> pydot.Dot:
return self._dot_graphs[name]
def get_main_dot_graph(self) -> pydot.Dot:
return self._dot_graphs[self._name]
| python/cutlass/backend/evt/passes/graph_drawer.py/0 | {
"file_path": "python/cutlass/backend/evt/passes/graph_drawer.py",
"repo_id": "python",
"token_count": 2285
} | 48 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import ctypes
from cuda import __version__, cuda
from cutlass.backend.utils.device import device_cc
_version_splits = [int(x) for x in __version__.split("rc")[0].split(".")]
_supports_cluster_launch = None
def supports_cluster_launch():
global _supports_cluster_launch
if _supports_cluster_launch is None:
major, minor = _version_splits[0], _version_splits[1]
_supports_cluster_launch = device_cc() >= 90 and (major > 11 or (major == 11 and minor >= 8))
return _supports_cluster_launch
class LaunchConfiguration:
def __init__(self, grid=[1, 1, 1], block=[1, 1, 1], smem=0):
self.grid = grid
self.block = block
self.shared_memory_capacity = smem
class ExecutableOperation:
def __init__(self, operation):
self.operation = operation
self.module = None
self.kernel = None
def name(self):
return self.operation.procedural_name()
def emit(self):
return ""
def can_implement(self, configuration, arguments):
raise NotImplementedError()
def get_host_workspace_size(self, arguments):
raise NotImplementedError()
def get_device_workspace_size(self, arguments):
raise NotImplementedError()
def plan(self, arguments):
raise NotImplementedError()
def initialize(self, host_workspace, device_workspace, launch_config, arguments, stream=cuda.CUstream(0)):
raise NotImplementedError()
def run_with_clusters(self, launch_config, kernel_params, stream=cuda.CUstream(0)):
if hasattr(self.operation, "tile_description") and hasattr(self.operation.tile_description, "cluster_shape"):
attr = cuda.CUlaunchAttribute()
attr.value.clusterDim.x, attr.value.clusterDim.y, attr.value.clusterDim.z = self.operation.tile_description.cluster_shape
attr.id = cuda.CUstreamAttrID.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION
attrs = [attr]
# Allow for non-portable cluster sizes
err, = cuda.cuFuncSetAttribute(
self.kernel, cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED, 1)
if err != cuda.CUresult.CUDA_SUCCESS:
return err
else:
attrs = []
config = cuda.CUlaunchConfig()
config.gridDimX, config.gridDimY, config.gridDimZ = launch_config.grid
config.blockDimX, config.blockDimY, config.blockDimZ = launch_config.block
config.blockDimZ = launch_config.block[2]
config.sharedMemBytes = launch_config.shared_memory_capacity
config.hStream = stream
config.attrs = attrs
config.numAttrs = len(attrs)
err, = cuda.cuLaunchKernelEx(
config, f=self.kernel, kernelParams=kernel_params, extra=0)
return err
def run_without_clusters(self, launch_config, kernel_params, stream=cuda.CUstream(0)):
err, = cuda.cuLaunchKernel(
self.kernel,
launch_config.grid[0], launch_config.grid[1], launch_config.grid[2],
launch_config.block[0], launch_config.block[1], launch_config.block[2],
launch_config.shared_memory_capacity,
stream,
kernel_params,
0)
return err
def run(self, host_workspace, device_workspace, launch_config, stream=cuda.CUstream(0)):
cArg = (ctypes.c_char * len(host_workspace)).from_buffer(host_workspace)
packed = (ctypes.c_void_p * 1)()
packed[0] = ctypes.addressof(cArg)
if supports_cluster_launch():
return self.run_with_clusters(launch_config, packed, stream)
else:
return self.run_without_clusters(launch_config, packed, stream)
| python/cutlass/backend/operation.py/0 | {
"file_path": "python/cutlass/backend/operation.py",
"repo_id": "python",
"token_count": 2048
} | 49 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Base operation used for defining high-level CUTLASS operations (e.g., GEMM, Conv2d)
"""
from bisect import bisect_left
from cutlass_library import (
DataType,
DataTypeSize,
MathOperation,
OperationKind,
SharedMemPerCC
)
import cutlass
from cutlass import get_option_registry
from cutlass.backend.evt import EpilogueFunctorVisitor
from cutlass.backend.utils.device import device_cc
from cutlass.epilogue import get_activations, get_activation_epilogue, identity
from cutlass.library_defaults import KernelsForDataType, _generator_ccs
from cutlass.swizzle import get_swizzling_functors
from cutlass.utils import datatypes, check
class OperationBase:
"""
Base operation used for defining high-level CUTLASS operations (e.g., GEMM, Conv2d)
"""
def __init__(self, cc: int = None, kernel_cc: int = None, operation_kind = OperationKind.Gemm):
"""
:param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90
:type cc: int
:param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80
:type kernel_cc: int
:param operation_kind: class of operation that will be performed (e.g., GEMM, Conv)
:type operation_kind: cutlass_library.OperationKind
"""
self.operation_kind = operation_kind
self.cc = cc if cc is not None else device_cc()
self.specified_kernel_cc = kernel_cc is not None
self.current_cc = kernel_cc if kernel_cc is not None else self._find_closest_cc(self.cc)
self.tile_description = None
self._math_operation = None
self.options = get_option_registry().options_for_cc(self.current_cc, operation_kind)
if self.options is None:
raise Exception(f"Invalid or unsupported compute capability: {self.current_cc}")
# Default activation function: identity
self._activation = identity
def _find_closest_cc(self, cc: int) -> int:
"""
Returns the closest CC in _generator_ccs less than or equal to `cc`
:param cc: compute capability to query
:type cc: int
:returns: closest CC in _generator_ccs less than or equal to `cc`
:rtype: int
"""
if cc in _generator_ccs:
return cc
# Find closest CC lower than this CC
idx = bisect_left(_generator_ccs, cc)
if idx == 0:
raise Exception(f'No valid CC to fall back to for {cc}')
return _generator_ccs[idx-1]
def activations(self) -> list:
"""
Returns possible activation functions that can be used
:return: list of activation functions that can be used
:rtype: list
"""
return get_activations()
def swizzling_functors(self) -> list:
"""
Returns possible swizzling functions that can be used
:return: list of swizzling functions that can be used
:rtype: list
"""
return get_swizzling_functors()
def _reset_options(self, cc: int):
"""
Resets the kernel options based on cc
:param cc: compute capability to reset to
:type cc: int
"""
if cc != self.current_cc:
if cc not in _generator_ccs:
raise Exception(f'Invalid CC for CUTLASS kernels: {cc}.')
self.current_cc = cc
self.options = get_option_registry().options_for_cc(self.current_cc, self.operation_kind)
def _verify_scalar(self, scalar, ref_scalar, ref_dtype, name):
"""
Verifies the following properties:
1) Either ``scalar`` or ``ref_scakar`` must be set (i.e., not ``None``)
2) If ``scalar`` is not ``None``, its datatype must match matches the current version
set by the plan (i.e., those in ``ref_dtype``)
If either of these properties does not hold, an exception is raised. If these properties hold and
``scalar`` is not ``None``, ``scalar`` is returned. Otherwise, ``ref_scalar`` is returned.
:param scalar: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type scalar: numpy/cupy/torch scalar
:param ref_scalar: object representing a tensor passed in on construction of this object, or ``None`` if no tensor was passed in
:type ref_scalar: numpy/cupy/torch scalar
:param ref_dtype: data type for the scalar that this object was initialized to
:param name: identifier of the scalar to verify. Used in raising exceptions
:type name: str
:return: valid scalar to use
:rtype: numpy/cupy/torch scalar
"""
if scalar is None:
if ref_scalar is None:
raise Exception(f"Scalar {name} must be set.")
return ref_scalar
if hasattr(scalar, "dtype"):
dtype = datatypes.library_type(scalar.dtype)
if dtype != ref_dtype:
raise Exception(
f"Tensor {name} with type {dtype} does not match expected type {ref_dtype}."
)
return scalar
def _verify_tensor(self, tensor, ref_tensor, ref_dtype, ref_layout, name):
"""
Verifies the following properties:
If ref_dtype is not void:
1) Either ``tensor`` or ``ref_tensor`` must be set (i.e., not ``None``)
2) If ``tensor`` is not ``None``, its datatype and layout must match matches the current versions
set by the plan (i.e., those in ``ref_dtype`` and ``ref_layout``)
If ref_dtype is void:
Neither ``tensor`` nor ``ref_tensor`` are set
If either of these properties does not hold, an exception is raised. If these properties hold and
``tensor`` is not ``None``, ``tensor`` is returned. Otherwise, ``ref_tensor`` is returned.
:param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type tensor: numpy/cupy/torch array/tensor object
:param ref_tensor: object representing a tensor passed in on construction of this object, or ``None`` if no tensor was passed in
:type ref_tensor: numpy/cupy/torch array/tensor object
:param ref_dtype: data type for the tensor that this object was initialized to
:param ref_layout: layout for the tensor that this object was initialized to
:param name: identifier of the tensor to verify. Used in raising exceptions
:type name: str
:return: valid tensor object to use
:rtype: numpy/cupy/torch array/tensor object
"""
if ref_dtype == DataType.void:
if tensor is not None or ref_tensor is not None:
raise Exception("Operands with element DataType.void must not be provided a tensor")
return None
if tensor is None:
if ref_tensor is None:
raise Exception(f"Tensor {name} must be set.")
return ref_tensor
self._verify_type_and_layout(tensor, ref_dtype, ref_layout, name)
return tensor
@property
def opclass(self) -> cutlass.OpcodeClass:
"""
Returns the opcode class currently in use
:return: opcode class currently in use
:rtype: cutlass.OpcodeClass
"""
return self.op_class
@opclass.setter
def opclass(self, oc: cutlass.OpcodeClass):
if isinstance(oc, str):
oc = datatypes.getattr_enum(cutlass.OpcodeClass, oc)
if oc in self.possible_op_classes:
self.op_class = oc
else:
raise Exception(
f'Unsupported operation class {oc} for CC {self.cc} and data type combination '
f'({self._element_a}, {self._element_b}, {self._element_accumulator}) and '
f'layout combination ({self._layout_a}, {self._layout_b}).')
# Changing the op class also changes the possible operations available. Reset these.
self.possible_operations = self.options.operations(
self.op_class, self._element_a, self._element_b,
self._element_accumulator, self._layout_a, self._layout_b, self._math_operation)
# Changing the op class changes the elements per access in the epilogue. Reset this.
if self.epilogue_functor is not None:
self.epilogue_functor = self._reset_epilogue_functor_alignment(self._elements_per_access(), self.epilogue_functor)
@property
def math_operation(self) -> cutlass.MathOperation:
"""
Returns the math operation currently in use
:return: math operation currently in use
:rtype: cutlass.MathOperation
"""
return self._math_operation
@math_operation.setter
def math_operation(self, mo: cutlass.MathOperation):
if isinstance(mo, str):
mo = datatypes.getattr_enum(cutlass.MathOperation, mo)
if not self.specified_kernel_cc:
if self.current_cc == 90:
# CUTLASS 3.0 kernels do not use different math operations. If one is specified, we
# revert to using a CUTLASS 2.x kernel by using SM80-tagged kernels.
cutlass.logger.warning("Reverting to using SM80-tagged kernel. Opclass may change.")
self._reset_options(80)
self._reset_operations(reset_epilogue=False)
elif self.current_cc == 90:
raise Exception("CUTLASS 3.0 kernels do not use different math operations. "
"To use 2.x kernels with a specific math operation, do not set the `kernel_cc`"
"parameter when constructing the plan.")
self._math_operation = mo
self._reset_operations()
def _elements_per_access(self):
if self.op_class == cutlass.OpcodeClass.Simt:
return 1
elif self._element_c != DataType.void:
return 128 // DataTypeSize[self._element_c]
else:
return 128 // max(self.possible_operations.alignments("C"))
def _create_epilogue_functor_activation(self, activation):
"""
Returns the epilogue functor with given activation function
"""
if self.epilogue_functor is None:
elements_per_access = self._elements_per_access()
else:
elements_per_access = self.epilogue_functor.epilogue_vector_length
if not self.specified_kernel_cc:
if self.current_cc == 90 and activation != identity:
# CUTLASS 3.0 kernels in Python currently only support identity activation. If one requests a non-identity activation,
# revert to using a CUTLASS 2.x kernel by using SM80-tagged kernels.
cutlass.logger.warning("Reverting to using SM80-tagged kernel. Opclass may change.")
if self._element_c != self._element_d:
raise Exception("CUTLASS 2.x kernels require element C to be the same as element D")
self._reset_options(80)
self._reset_operations(reset_epilogue=False)
elif (self.cc == 90 and self.current_cc != 90 and activation == identity and self._math_operation is None):
# SM80 fallback kernels are currently used. Since an identity activation is requested,
# we can switch back to using SM90 kernels.
self._reset_options(90)
self._reset_operations(reset_epilogue=False)
else:
if self.current_cc == 90 and activation != identity:
raise Exception("Epilogues with elementwise fusion are not currently supported "
"in the Python interface for 3.x kernels. To use 2.x kernels "
"with fused elementwise epilogues, do not set the `kernel_cc` "
"parameter when constructing the plan.")
return get_activation_epilogue(
activation,
self._element_d,
elements_per_access,
self._element_accumulator,
self._element_accumulator,
)
def _reset_epilogue_functor_activation(self, activation):
"""
Set the epilogue functor based on the provided activation function
"""
self.epilogue_functor = self._create_epilogue_functor_activation(activation)
def _reset_epilogue_functor_alignment(self, alignment, epilogue_functor):
"""
Reset the alignment of the current epilogue functor based on alignment C
"""
if isinstance(epilogue_functor, EpilogueFunctorVisitor):
return epilogue_functor
if epilogue_functor is None or not hasattr(epilogue_functor, 'activation_functor'):
# Identity epilogue does not have 'activation_functor'
activation = identity
else:
activation = epilogue_functor.activation_functor
epilogue_functor = get_activation_epilogue(
activation,
self._element_d,
alignment,
self._element_accumulator,
self._element_accumulator,
)
return epilogue_functor
@property
def activation(self):
"""
Returns the type of the current activation function used
"""
if hasattr(self.epilogue_functor, "activation_functor"):
return self.epilogue_functor.activation_functor
else:
return identity
@activation.setter
def activation(self, act):
"""
Sets the type of the activation function to use
Activation can come with a set of arguments
:param act: type of activation function to use
:type act: str or tuple. e.g. "relu", ("leaky_relu", 0.01)
"""
if isinstance(act, tuple):
if isinstance(act[0], str):
act_fn = getattr(cutlass.backend.epilogue, act[0])
else:
act_fn = act[0]
self._reset_epilogue_functor_activation(act_fn)
self._activation_args = act[1]
self._activation = act[0]
else:
if isinstance(act, str):
act = getattr(cutlass.backend.epilogue, act)
self._reset_epilogue_functor_activation(act)
self._activation = act
@property
def epilogue_visitor(self):
"""
Return the epilogue functor
"""
return self.epilogue_functor
@epilogue_visitor.setter
def epilogue_visitor(self, visitor):
"""
Create the epilogue visitor
"""
self.epilogue_functor = EpilogueFunctorVisitor(self.cc, visitor)
# The epilogue_functor may consume too much shared memory
# Reset the possible operations
if self.cc != 90:
# The shared memory is only a concern for sm90 epilogue
# In sm80, the epilogue and mainloop share the shared memory
return
datatype_comb = self.possible_operations.datatype_comb
layout_comb = self.possible_operations.layout_comb
new_possible_operations = KernelsForDataType(datatype_comb, layout_comb)
for operation in self.possible_operations.all_operations:
td = datatypes.td_from_profiler_op(operation)
# Filter invalid epilogue schedules
if td.epilogue_schedule not in [
cutlass.EpilogueScheduleType.TmaWarpSpecialized,
cutlass.EpilogueScheduleType.TmaWarpSpecializedCooperative]:
continue
epilogue_smem_bytes = self.epilogue_functor.get_smem_size(td)
# Verify the maximum number of mainloop stages
mainloop_smem_per_stage = check.calculate_smem_usage_per_stage(td, OperationKind.Gemm)
smem_capacity_bytes = SharedMemPerCC[self.cc] << 10
mainloop_stages = (smem_capacity_bytes - epilogue_smem_bytes) // mainloop_smem_per_stage
if mainloop_stages < 2:
# Mainloop stages must >= 2
continue
new_possible_operations.add(operation)
if len(new_possible_operations.all_operations) == 0:
raise RuntimeError(
"The epilogue consumes too much shared memory. "
"No valid tile description is found in the generator.")
self.possible_operations = new_possible_operations
def run_setup(self):
"""
Steps that must be taken before caling `plan.run()`
"""
# Initialize the memory pool if, if not already done
cutlass.get_memory_pool()
| python/cutlass/op/op.py/0 | {
"file_path": "python/cutlass/op/op.py",
"repo_id": "python",
"token_count": 7578
} | 50 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for emitting RankK kernels
"""
import enum
import functools
import operator
import os.path
import shutil
try:
import builtins
if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True:
raise ImportError("Disabling attempt to import cutlass_library")
from cutlass_library.library import *
except ImportError:
from library import *
###################################################################################################
#
# Data structure modeling a Rank K update operation
#
###################################################################################################
#
class RankKOperation:
#
def __init__(self, rank_k_kind, arch, tile_description, A, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \
blas_mode = BlasMode.symmetric):
self.blas_mode = blas_mode
self.operation_kind = OperationKind.RankK
self.arch = arch
self.tile_description = tile_description
self.rank_k_kind = rank_k_kind
self.A = A
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_mixed_input(self):
return False
#
def is_planar_complex(self):
return False
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
MathOperation.and_popc: 'and'
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
operation_name = 'syrk' if self.blas_mode == BlasMode.symmetric else 'herk'
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name)
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)]
)
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.C.fill_mode])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.A.alignment, self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${fill_mode}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'fill_mode': self.fill_mode_name(),
'alignment': "%d" % self.A.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitRankKUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.rank_k_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::RankK<
${element_a}, ${layout_a},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${split_k_serial},
${math_operation}
>;
"""
self.rank_k_complex_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::RankK<
${element_a}, ${layout_a},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${split_k_serial},
${math_operation},
${transform_a},
${blas_mode}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'fill_mode': FillModeTag[operation.C.fill_mode],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'blas_mode': BlasModeTag[operation.blas_mode]
}
rank_k_template = self.rank_k_complex_template if operation.is_complex() else self.rank_k_template
return SubstituteTemplate(rank_k_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitRankKConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
RankKKind.Universal: EmitRankKUniversalInstance,
}
self.rank_k_kind_wrappers = {
RankKKind.Universal: 'RankKOperation',
}
self.instance_template = {
RankKKind.Universal: """
${compile_guard_start}
manifest.append(new ${rank_k_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by rank_k_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "rank_k_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.rank_k_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.rank_k_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'rank_k_kind': self.rank_k_kind_wrappers[operation.rank_k_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
| python/cutlass_library/rank_k_operation.py/0 | {
"file_path": "python/cutlass_library/rank_k_operation.py",
"repo_id": "python",
"token_count": 5264
} | 51 |
function escapeRegExp(string) {
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
}
/**
* Removes excluded text from a Node.
*
* @param {Node} target Node to filter.
* @param {string} exclude CSS selector of nodes to exclude.
* @returns {DOMString} Text from `target` with text removed.
*/
export function filterText(target, exclude) {
const clone = target.cloneNode(true); // clone as to not modify the live DOM
if (exclude) {
// remove excluded nodes
clone.querySelectorAll(exclude).forEach(node => node.remove());
}
return clone.innerText;
}
// Callback when a copy button is clicked. Will be passed the node that was clicked
// should then grab the text and replace pieces of text that shouldn't be used in output
export function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") {
var regexp;
var match;
// Do we check for line continuation characters and "HERE-documents"?
var useLineCont = !!lineContinuationChar
var useHereDoc = !!hereDocDelim
// create regexp to capture prompt and remaining line
if (isRegexp) {
regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)')
} else {
regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)')
}
const outputLines = [];
var promptFound = false;
var gotLineCont = false;
var gotHereDoc = false;
const lineGotPrompt = [];
for (const line of textContent.split('\n')) {
match = line.match(regexp)
if (match || gotLineCont || gotHereDoc) {
promptFound = regexp.test(line)
lineGotPrompt.push(promptFound)
if (removePrompts && promptFound) {
outputLines.push(match[2])
} else {
outputLines.push(line)
}
gotLineCont = line.endsWith(lineContinuationChar) & useLineCont
if (line.includes(hereDocDelim) & useHereDoc)
gotHereDoc = !gotHereDoc
} else if (!onlyCopyPromptLines) {
outputLines.push(line)
} else if (copyEmptyLines && line.trim() === '') {
outputLines.push(line)
}
}
// If no lines with the prompt were found then just use original lines
if (lineGotPrompt.some(v => v === true)) {
textContent = outputLines.join('\n');
}
// Remove a trailing newline to avoid auto-running when pasting
if (textContent.endsWith("\n")) {
textContent = textContent.slice(0, -1)
}
return textContent
}
| python/docs/_static/copybutton_funcs.js/0 | {
"file_path": "python/docs/_static/copybutton_funcs.js",
"repo_id": "python",
"token_count": 1049
} | 52 |