text
stringlengths 27
947k
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
80
|
---|---|---|---|
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Provides several functions for filling tensors with data.
*/
#pragma once
// Standard Library includes
#include <utility>
#include <cstdlib>
#include <cmath>
// Cute includes
#include "cute/tensor.hpp"
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/quaternion.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Tensor reductions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side
/// workspace
template <
typename Tensor,
typename ComputeType,
typename ReduceOp,
typename TransformOp
>
ComputeType TensorTransformReduce(
Tensor view,
ComputeType identity,
ReduceOp reduce,
TransformOp transform
) {
for (int64_t idx = 0; idx < cute::size(view); ++idx) {
identity = reduce(identity, transform(view(idx)));
}
return identity;
}
/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side
/// workspace
template <
typename TensorA,
typename TensorB,
typename ComputeType,
typename ReduceOp,
typename TransformOp
>
ComputeType TensorTransformReduce(
TensorA view_A,
TensorB view_B,
ComputeType identity,
ReduceOp reduce,
TransformOp transform) {
if (cute::size(view_A) != cute::size(view_B)) {
throw std::runtime_error("Tensor sizes must match.");
}
for (int64_t idx = 0; idx < cute::size(view_A); ++idx) {
identity = reduce(identity, transform(view_A(idx), view_B(idx)));
}
return identity;
}
/// Helper to compute the sum of the elements of a tensor
template <
typename Tensor,
typename ComputeType = typename Tensor::value_type
>
ComputeType TensorSum(
Tensor view,
ComputeType identity = ComputeType()
) {
plus<ComputeType> reduce;
NumericConverter<ComputeType, typename Tensor::value_type> transform;
return TensorTransformReduce(
view, identity, reduce, transform);
}
/// Helper to compute the sum of the squares of the elements of a tensor
template <
typename Tensor,
typename ComputeType = typename Tensor::value_type
>
ComputeType TensorSumSq(
Tensor view,
ComputeType identity = ComputeType()
) {
plus<ComputeType> reduce;
magnitude_squared<typename Tensor::value_type, ComputeType> transform;
return TensorTransformReduce(
view, identity, reduce, transform);
}
/// Helper to compute the norm of the elements of a tensor.
template <
typename Tensor,
typename ComputeType = double
>
ComputeType TensorNorm(
Tensor view,
ComputeType identity = ComputeType()
) {
return std::sqrt(TensorSumSq(view, identity));
}
/// Helper to compute the sum of the squares of the differences of two tensors
template <
typename TensorA,
typename TensorB,
typename ComputeType = double
>
ComputeType TensorSumSqDiff(
TensorA view_A,
TensorB view_B,
ComputeType identity = ComputeType()
) {
plus<ComputeType> reduce;
magnitude_squared_difference<typename TensorA::value_type, ComputeType> transform;
return TensorTransformReduce(
view_A, view_B, identity, reduce, transform);
}
/// Helper to compute the norm of the tensor computed as the difference of two tensors in memory
template <
typename TensorA,
typename TensorB,
typename ComputeType = double
>
ComputeType TensorNormDiff(
TensorA view_A,
TensorB view_B,
ComputeType identity = ComputeType()
) {
return std::sqrt(TensorSumSqDiff(view_A, view_B, identity));
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/host/tensor_reduce.hpp/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/tensor_reduce.hpp",
"repo_id": "tools",
"token_count": 1705
} | 63 |
var searchData=
[
['noexcept',['noexcept',['../platform_8h.html#a189faadd7f99f6c354db09acbb2aafcd',1,'platform.h']]],
['nullptr',['nullptr',['../platform_8h.html#ab979d9d4b4923f7c54d6caa6e1a61936',1,'platform.h']]]
];
| docs/search/defines_2.js/0 | {
"file_path": "docs/search/defines_2.js",
"repo_id": "docs",
"token_count": 109
} | 0 |
var searchData=
[
['numerictypeid',['NumericTypeID',['../namespacecutlass_1_1library.html#a366ecc865ac5b24cfdfd392199ba8e9e',1,'cutlass::library']]]
];
| docs/search/enums_6.js/0 | {
"file_path": "docs/search/enums_6.js",
"repo_id": "docs",
"token_count": 68
} | 1 |
var searchData=
[
['uniform',['Uniform',['../structcutlass_1_1Distribution.html#a499f4023e0d42356ce71d38cc32bf92aa0fad91cf4fcbc8ab015053bea77090a6',1,'cutlass::Distribution']]]
];
| docs/search/enumvalues_5.js/0 | {
"file_path": "docs/search/enumvalues_5.js",
"repo_id": "docs",
"token_count": 82
} | 2 |
var searchData=
[
['command_5fline_2eh',['command_line.h',['../command__line_8h.html',1,'']]],
['complex_2eh',['complex.h',['../complex_8h.html',1,'']]],
['conversion_5fop_2eh',['conversion_op.h',['../conversion__op_8h.html',1,'']]],
['coord_2eh',['coord.h',['../coord_8h.html',1,'']]],
['core_5fio_2eh',['core_io.h',['../core__io_8h.html',1,'']]],
['cutlass_2eh',['cutlass.h',['../cutlass_8h.html',1,'']]]
];
| docs/search/files_2.js/0 | {
"file_path": "docs/search/files_2.js",
"repo_id": "docs",
"token_count": 195
} | 3 |
var searchData=
[
['layout_2eh',['layout.h',['../layout_8h.html',1,'']]],
['library_2eh',['library.h',['../library_8h.html',1,'']]],
['linear_5fcombination_2eh',['linear_combination.h',['../linear__combination_8h.html',1,'']]],
['linear_5fcombination_5fclamp_2eh',['linear_combination_clamp.h',['../linear__combination__clamp_8h.html',1,'']]],
['linear_5fcombination_5frelu_2eh',['linear_combination_relu.h',['../linear__combination__relu_8h.html',1,'']]],
['matrix_2eh',['matrix.h',['../layout_2matrix_8h.html',1,'']]]
];
| docs/search/files_a.js/0 | {
"file_path": "docs/search/files_a.js",
"repo_id": "docs",
"token_count": 225
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/transform/threadblock/predicated_vector_access_iterator.h"
#include "cutlass/transform/threadblock/vector_iterator.h"
#include "cutlass/transform/warp/vector_fragment_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "kernel/default_b2b_conv2d_fprop.h"
#include "kernel/b2b_implicit_gemm_convolution.h"
#include "threadblock/b2b_implicit_gemm_pipelined_smem_accumulator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm
/// and 2 stage pipeline.
/// Accumulator will be staged in shared memory.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
typename MathOperatorTag
>
struct DefaultB2bConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
true
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, LayoutA,
ThreadMapA0
>
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, LayoutB,
ThreadMapB0
>
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 2;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Define iterators over tiles from the B operand
using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB,
ThreadMapB1
>
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp;
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Use fragment iterator for the accumulator
using SmemAccumulatorLayout = cutlass::layout::RowMajor;
using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
WarpShape0, InstructionShape,
ElementAccumulator,
typename WarpMmaTensorOp0::Policy::Operator::FragmentC,
SmemAccumulatorLayout
>;
// Store Accumulator tiles to Shared Memory
using SmemIteratorD0 =
cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape0,
InstructionShape,
ElementC,
SmemAccumulatorLayout
>;
static int const kThreadCount = 32;
// load warp tile from Shared Memory accumulator
using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileIterator<
MatrixShape<WarpShape1::kM, InstructionShape::kK>, cutlass::gemm::Operand::kA,
ElementA, SmemAccumulatorLayout,
MatrixShape<InstructionShape::kM, InstructionShape::kK>,
WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount>;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmPipelinedSmemAccumulator<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
IteratorB0,
SmemIteratorB0,
IteratorAccumulatorScaleBias,
FragmentIteratorAccumulator,
SmemIteratorD0,
ThreadblockShape1,
WarpIteratorA1,
IteratorB1,
SmemIteratorB1,
ElementC,
LayoutC,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1
>;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and 2 stage
/// pipeline with interleaved layout.
/// Accumulator will be staged in shared memory.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int InterleavedK
>
struct DefaultB2bConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
true
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
2, MathOperatorTag, true>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
2, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapA0 = typename MmaCore0::SmemThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA0
>
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB0 = typename MmaCore0::SmemThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB0
>
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 4; //For interleaved layout
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Define iterators over tiles from the B operand
using ThreadMapB1 = typename MmaCore1::SmemThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB1
>
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp;
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Use fragment iterator for the accumulator
using SmemAccumulatorLayout = cutlass::layout::ColumnMajorInterleaved<16>;
using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
WarpShape0, InstructionShape,
ElementAccumulator,
typename WarpMmaTensorOp0::Policy::Operator::FragmentC,
SmemAccumulatorLayout
>;
// Store Accumulator tiles to Shared Memory
using SmemIteratorD0 =
cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape0,
InstructionShape,
ElementC,
SmemAccumulatorLayout
>;
static int const kThreadCount = 32;
// load warp tile from Shared Memory accumulator
using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileIteratorCanonical<
MatrixShape<WarpShape1::kM, InstructionShape::kK>, cutlass::gemm::Operand::kA,
ElementA, SmemAccumulatorLayout,
MatrixShape<InstructionShape::kM, InstructionShape::kK>,
WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount>;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmPipelinedSmemAccumulator<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
IteratorB0,
SmemIteratorB0,
IteratorAccumulatorScaleBias,
FragmentIteratorAccumulator,
SmemIteratorD0,
ThreadblockShape1,
WarpIteratorA1,
IteratorB1,
SmemIteratorB1,
ElementC,
LayoutC,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm
/// and 2 stage pipeline.
/// Accumulator will be staged in shared memory.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
typename MathOperatorTag
>
struct DefaultB2bConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
true
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, LayoutA,
ThreadMapA0
>
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, LayoutB,
ThreadMapB0
>
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 2;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Define iterators over tiles from the B operand
using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB,
ThreadMapB1
>
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp;
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Use fragment iterator for the accumulator
using SmemAccumulatorLayout = cutlass::layout::RowMajor;
using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
WarpShape0, InstructionShape,
ElementAccumulator,
typename WarpMmaTensorOp0::Policy::Operator::FragmentC,
SmemAccumulatorLayout
>;
// Store Accumulator tiles to Shared Memory
using SmemIteratorD0 =
cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape0,
InstructionShape,
ElementC,
SmemAccumulatorLayout
>;
static int const kThreadCount = 32;
// load warp tile from Shared Memory accumulator
using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileIterator<
MatrixShape<WarpShape1::kM, InstructionShape::kK>, cutlass::gemm::Operand::kA,
ElementA, SmemAccumulatorLayout,
MatrixShape<InstructionShape::kM, InstructionShape::kK>,
WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount>;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmPipelinedSmemAccumulator<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
IteratorB0,
SmemIteratorB0,
IteratorAccumulatorScaleBias,
FragmentIteratorAccumulator,
SmemIteratorD0,
ThreadblockShape1,
WarpIteratorA1,
IteratorB1,
SmemIteratorB1,
ElementC,
LayoutC,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1
>;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm and 2 stage
/// pipeline with interleaved layout.
/// Accumulator will be staged in shared memory.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
typename MathOperatorTag,
int InterleavedK
>
struct DefaultB2bConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
true
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
2, MathOperatorTag, true>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
2, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
// Define iterators over tiles from the A operand
using ThreadMapA0 = typename MmaCore0::SmemThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA0
>
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB0 = typename MmaCore0::SmemThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB0
>
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 4; //For interleaved layout
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape0::kM, WarpShape0::kN>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
using ThreadMapB1 = typename MmaCore1::SmemThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB1
>
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp0 = typename MmaCore0::MmaTensorOp;
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Use fragment iterator for the accumulator
using SmemAccumulatorLayout = cutlass::layout::ColumnMajorInterleaved<16>;
using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
WarpShape0, InstructionShape,
ElementAccumulator,
typename WarpMmaTensorOp0::Policy::Operator::FragmentC,
SmemAccumulatorLayout
>;
// Store Accumulator tiles to Shared Memory
using SmemIteratorD0 =
cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape0,
InstructionShape,
ElementC,
SmemAccumulatorLayout
>;
static int const kThreadCount = 32;
// load warp tile from Shared Memory accumulator
using WarpIteratorA1 = cutlass::gemm::warp::MmaTensorOpMultiplicandTileIteratorCanonical<
MatrixShape<WarpShape1::kM, InstructionShape::kK>, cutlass::gemm::Operand::kA,
ElementA, SmemAccumulatorLayout,
MatrixShape<InstructionShape::kM, InstructionShape::kK>,
WarpMmaTensorOp1::Policy::OpDelta::kRow, kThreadCount>;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmPipelinedSmemAccumulator<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
IteratorB0,
SmemIteratorB0,
IteratorAccumulatorScaleBias,
FragmentIteratorAccumulator,
SmemIteratorD0,
ThreadblockShape1,
WarpIteratorA1,
IteratorB1,
SmemIteratorB1,
ElementC,
LayoutC,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop_smem_accumulator_sm75.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop_smem_accumulator_sm75.h",
"repo_id": "examples",
"token_count": 10064
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped Back-to-back fused GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "threadblock/b2b_mma_base_smem_accumulator.h"
#include "cutlass/epilogue/threadblock/epilogue_smem_accumulator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape0_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorA0_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA0_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB0_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB0_,
/// Iterates over vectors of scale and bias vector in global memory
// (concept: VectorIterator)
typename IteratorAccumulatorScaleBias_,
/// Iterates over accumulator tile
typename FragmentIteratorAccumulator_,
/// Iterates over accumulator tile in shared memory
typename SmemIteratorD0_,
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape1_,
/// Iterates over the intermediate accumulator tile in shared memory
typename WarpIteratorA1_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB1_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB1_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Output operator for 1st Gemm(concept: epilogue::thread::LinearCombinationClamp, etc...)
typename OutputOp_,
/// Policy describing tuning details (concept: MmaPipelinedPolicy)
typename Policy0_,
/// Policy describing tuning details (concept: MmaPipelinedPolicy)
typename Policy1_,
/// Transformation applied to A0 operand
typename TransformA0_ = NumericArrayConverter<
typename SmemIteratorA0_::Element,
typename IteratorA0_::Element,
IteratorA0_::Fragment::kElements>,
///
/// Transformation applied to B0 operand
typename TransformB0_ = NumericArrayConverter<
typename SmemIteratorB0_::Element,
typename IteratorB0_::Element,
IteratorB0_::Fragment::kElements>,
///
/// Transformation applied to B1 operand
typename TransformB1_ = NumericArrayConverter<
typename SmemIteratorB1_::Element,
typename IteratorB1_::Element,
IteratorB1_::Fragment::kElements>,
/// Used for partial specialization
typename Enable = bool
>
class B2bMmaPipelinedSmemAccumulator :
public B2bMmaBaseSmemAccumulator<Shape0_, Shape1_, Policy0_, Policy1_, SmemIteratorD0_, 2> {
public:
///< Base class
using Base = B2bMmaBaseSmemAccumulator<Shape0_, Shape1_, Policy0_, Policy1_, SmemIteratorD0_, 2>;
using Shape0 = Shape0_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using IteratorA0 = IteratorA0_; ///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA0;
using IteratorB0 = IteratorB0_; ///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB0;
using IteratorAccumulatorScaleBias = IteratorAccumulatorScaleBias_; ///< Iterates over tiles of the scale and bias vectors in global memory
using Policy0 = Policy0_; ///< Policy0 describing tuning details
using SmemIteratorA0 = SmemIteratorA0_;
using SmemIteratorB0 = SmemIteratorB0_;
using SmemIteratorD0 = SmemIteratorD0_; ///< Iterates over accumulator tile in shared memory
using FragmentIteratorAccumulator = FragmentIteratorAccumulator_; ///< Iterates over accumulator tile
using Shape1 = Shape1_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using IteratorB1 = IteratorB1_; ///< Iterates over tiles of B operand in global memory
using Policy1 = Policy1_; ///< Policy1 describing tuning details
using Policy = Policy1; ///< Export Policy1 as the threadblock-level Mma's policy
using Shape = Shape1;
using SmemIteratorB1 = SmemIteratorB1_;
using WarpIteratorA1 = WarpIteratorA1_; ///< Iterates over the intermediate accumulator tile in shared memory
using ElementC = ElementC_; ///< Data type of accumulator matrix
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
using OutputOp = OutputOp_; ///< Epilogue after 1st Gemm
using TransformA0 = TransformA0_;
using TransformB0 = TransformB0_;
using TransformB1 = TransformB1_;
//
// Dependent types
//
/// Fragment of operand A loaded from global memory
using FragmentA0 = typename IteratorA0::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB0 = typename IteratorB0::Fragment;
/// Fragment of accumulator tile
using FragmentC0 = typename Policy0::Operator::FragmentC;
/// Warp-level Mma
using Operator0 = typename Policy0::Operator;
/// Fragment of operand B loaded from global memory
using FragmentB1 = typename IteratorB1::Fragment;
/// Fragment of accumulator tile
using FragmentC1 = typename Policy1::Operator::FragmentC;
/// Warp-level Mma
using Operator1 = typename Policy1::Operator;
/// Obtain the arch tag from the warp-level operator
using ArchTag = typename Policy0::Operator::ArchTag;
/// Complex transform on A0 operand
static ComplexTransform const kTransformA0 = Operator0::kTransformA;
/// Complex transform on B0 operand
static ComplexTransform const kTransformB0 = Operator0::kTransformB;
/// Complex transform on B1 operand
static ComplexTransform const kTransformB1 = Operator1::kTransformB;
/// Complex transform exports needed by higher-level kernels
static ComplexTransform const kTransformA = kTransformA0;
static ComplexTransform const kTransformB = kTransformB0;
/// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
static_assert((Base::kStages==2), "MmaPipelined requires kStages set to value 2");
/// Epilog in shared memory
using Epilogue0 = epilogue::threadblock::EpilogueSmemAccumulator<
SmemIteratorD0, ///< SmemTileIterator
FragmentIteratorAccumulator, ///< AccumulatorFragmentIterator
IteratorAccumulatorScaleBias, ///< ScaleBiasIterator
OutputOp>; ///< Output operator
private:
using WarpFragmentA0 = typename Operator0::FragmentA;
using WarpFragmentB0 = typename Operator0::FragmentB;
using WarpFragmentA1 = typename Operator1::FragmentA;
using WarpFragmentB1 = typename Operator1::FragmentB;
protected:
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA0 smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B0 operand to shared memory
SmemIteratorB0 smem_iterator_B0_;
/// Shared Memory Iterator to store accumulator tile
SmemIteratorD0 smem_iterator_D0_;
/// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile
WarpIteratorA1 warp_tile_iterator_A1_;
/// Iterator to write threadblock-scoped tile of B1 operand to shared memory
SmemIteratorB1 smem_iterator_B1_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
B2bMmaPipelinedSmemAccumulator(
typename Base::B2bMmaSharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM
int thread_idx, ///< ID within the threadblock
int warp_idx, ///< ID of warp
int lane_idx, ///< ID of each thread within a warp
int problem_size_0_n ///< GEMM0 N is used for accumulator extent
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.b2b_mma_shared_storage.shared_storage0.operand_A_ref(), thread_idx),
smem_iterator_B0_(shared_storage.b2b_mma_shared_storage.shared_storage0.operand_B_ref(), thread_idx),
smem_iterator_D0_(shared_storage.accumulator_shared_storage0.accum_ref(), lane_idx),
warp_tile_iterator_A1_(shared_storage.accumulator_shared_storage0.accum_ref(), {Base::WarpGemm1::kM, problem_size_0_n}, lane_idx),
smem_iterator_B1_(shared_storage.b2b_mma_shared_storage.shared_storage1.operand_B_ref(), thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn_0 = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN);
int warp_idx_k_0 = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN);
int warp_idx_m_0 = warp_idx_mn_0 % Base::WarpCount0::kM;
int warp_idx_n_0 = warp_idx_mn_0 / Base::WarpCount0::kM;
int tile_offset_k_0 = Base::kWarpGemmIterations0 * warp_idx_k_0;
int warp_idx_mn_1 = warp_idx % (Base::WarpCount1::kM * Base::WarpCount1::kN);
int warp_idx_k_1 = warp_idx / (Base::WarpCount1::kM * Base::WarpCount1::kN);
int warp_idx_m_1 = warp_idx_mn_1 % Base::WarpCount1::kM;
int warp_idx_n_1 = warp_idx_mn_1 / Base::WarpCount1::kM;
int tile_offset_k_1 = Base::kWarpGemmIterations1 * warp_idx_k_1;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A0_.add_tile_offset({warp_idx_m_0, tile_offset_k_0});
this->warp_tile_iterator_B0_.add_tile_offset({tile_offset_k_0, warp_idx_n_0});
warp_tile_iterator_A1_.add_tile_offset({warp_idx_m_1, tile_offset_k_1});
this->warp_tile_iterator_B1_.add_tile_offset({tile_offset_k_1, warp_idx_n_1});
// Add smem accumulator iterator warp offset
smem_iterator_D0_.add_tile_offset({ warp_idx_m_0 * SmemIteratorD0::TileIterations::kRow,
warp_idx_n_0 * SmemIteratorD0::TileIterations::kColumn});
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
int gemm_k_iterations_0, ///< number of iterations of the mainloop
FragmentC1 &accum, ///< destination accumulator tile
IteratorA0 iterator_A, ///< iterator over A operand in global memory
IteratorB0 iterator_B0, ///< iterator over B0 operand in global memory
IteratorAccumulatorScaleBias iterator_accum0_scale, ///< iterator over D0 scale vector in global memory
IteratorAccumulatorScaleBias iterator_accum0_bias, ///< iterator over D0 bias vector in global memory
IteratorB1 iterator_B1, ///< iterator over B1 operand in global memory
FragmentC0 const &src_accum, ///< source accumualtor tile
OutputOp output_op_0, ///< epilogue operation after 1st Gemm
TransformA0 transform_A0 = TransformA0(), ///< transformation applied to A0 fragment
TransformB0 transform_B0 = TransformB0(), ///< transformation applied to B0 fragment
TransformB1 transform_B1 = TransformB1()) { ///< transformation applied to B1 fragment
//
// Prologue
//
// Perform accumulation in the 'd' output operand
FragmentC0 accum0 = src_accum;
FragmentA0 tb_frag_A;
FragmentB0 tb_frag_B0;
tb_frag_A.clear();
tb_frag_B0.clear();
// The last kblock is loaded in the prolog
iterator_A.load(tb_frag_A);
iterator_B0.load(tb_frag_B0);
++iterator_A;
++iterator_B0;
this->smem_iterator_A_.store(transform_A0(tb_frag_A));
this->smem_iterator_B0_.store(transform_B0(tb_frag_B0));
++this->smem_iterator_A_;
++this->smem_iterator_B0_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA0 warp_frag_A0[2];
WarpFragmentB0 warp_frag_B0[2];
this->warp_tile_iterator_A0_.set_kgroup_index(0);
this->warp_tile_iterator_B0_.set_kgroup_index(0);
this->warp_tile_iterator_A0_.load(warp_frag_A0[0]);
this->warp_tile_iterator_B0_.load(warp_frag_B0[0]);
++this->warp_tile_iterator_A0_;
++this->warp_tile_iterator_B0_;
Operator0 warp_mma0;
int smem_write_stage_idx = 1;
// Avoid reading out of bounds
iterator_A.clear_mask(gemm_k_iterations_0 <= 1);
iterator_B0.clear_mask(gemm_k_iterations_0 <= 1);
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing
// shared memory loads (which have the tightest latency requirement).
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations_0 > 0; --gemm_k_iterations_0) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations0 - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(transform_A0(tb_frag_A));
this->smem_iterator_B0_.store(transform_B0(tb_frag_B0));
__syncthreads();
++this->smem_iterator_A_;
++this->smem_iterator_B0_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B0_.add_tile_offset({-Base::kStages, 0});
}
else {
this->warp_tile_iterator_A0_.add_tile_offset(
{0, -Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0});
this->warp_tile_iterator_B0_.add_tile_offset(
{-Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0,
0});
}
smem_write_stage_idx ^= 1;
}
this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);
this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);
this->warp_tile_iterator_A0_.load(warp_frag_A0[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B0_.load(warp_frag_B0[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A0_;
++this->warp_tile_iterator_B0_;
if (warp_mma_k == 0) {
iterator_A.load(tb_frag_A);
iterator_B0.load(tb_frag_B0);
++iterator_A;
++iterator_B0;
// Avoid reading out of bounds if this was the last loop iteration
iterator_A.clear_mask(gemm_k_iterations_0 <= 2);
iterator_B0.clear_mask(gemm_k_iterations_0 <= 2);
}
warp_mma0(accum0, warp_frag_A0[warp_mma_k % 2],
warp_frag_B0[warp_mma_k % 2], accum0);
}
}
/// Epilogue for the first Implicit Gemm
Epilogue0 epilogue0;
epilogue0(output_op_0, smem_iterator_D0_, accum0, iterator_accum0_scale, iterator_accum0_bias);
__syncthreads();
//2nd Gemm
//
// Prologue
//
FragmentB1 tb_frag_B1;
tb_frag_B1.clear();
// The last kblock is loaded in the prolog
iterator_B1.load(tb_frag_B1);
++iterator_B1;
this->smem_iterator_B1_.store(transform_B1(tb_frag_B1));
++this->smem_iterator_B1_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA1 warp_frag_A1[2];
WarpFragmentB1 warp_frag_B1[2];
this->warp_tile_iterator_B1_.set_kgroup_index(0);
warp_tile_iterator_A1_.load(warp_frag_A1[0]);
this->warp_tile_iterator_B1_.load(warp_frag_B1[0]);
++warp_tile_iterator_A1_;
++this->warp_tile_iterator_B1_;
Operator1 warp_mma1;
smem_write_stage_idx = 1;
int gemm_k_iterations_1 = Shape0::kN / Shape1::kK;
// Avoid reading out of bounds
iterator_B1.clear_mask(gemm_k_iterations_1 <= 1);
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_PRAGMA_UNROLL
for (; gemm_k_iterations_1 > 0; --gemm_k_iterations_1) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations1 - 1) {
// Write fragments to shared memory
this->smem_iterator_B1_.store(transform_B1(tb_frag_B1));
__syncthreads();
++this->smem_iterator_B1_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0});
}
else {
this->warp_tile_iterator_B1_.add_tile_offset(
{-Base::kStages * Policy1::kPartitionsK *
Base::kWarpGemmIterations1,
0});
}
smem_write_stage_idx ^= 1;
}
this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1);
// skip warp tile loading for the last kgroup
if(gemm_k_iterations_1 > 1 || warp_mma_k < Base::kWarpGemmIterations1 - 1)
warp_tile_iterator_A1_.load(warp_frag_A1[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B1_.load(warp_frag_B1[(warp_mma_k + 1) % 2]);
++warp_tile_iterator_A1_;
++this->warp_tile_iterator_B1_;
if (warp_mma_k == 0) {
iterator_B1.load(tb_frag_B1);
++iterator_B1;
// Avoid reading out of bounds if this was the last loop iteration
iterator_B1.clear_mask(gemm_k_iterations_1 <= 2);
}
warp_mma1(accum, warp_frag_A1[warp_mma_k % 2],
warp_frag_B1[warp_mma_k % 2], accum);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined_smem_accumulator.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined_smem_accumulator.h",
"repo_id": "examples",
"token_count": 8415
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example requires NVIDIA Ampere GPU or later.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// CUTLASS Includes
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
// CUTLASS Utility Includes
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/gemm_complex.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// Define the overal warp-level problem shape
int const kM = 27;
int const kN = 31;
int const kK = 17;
///////////////////////////////////////////////////////////////////////////////////////////////////
// Define a warp-level GEMM operator.
//
// This template could be part of the CUTLASS Template Library or implemented internally. This
// wraps the matrix multiply operation and epilogue with a GEMM-like interface that can be
// instantiated in device code.
namespace cutlass {
namespace gemm {
namespace warp {
template <
typename Shape,
typename InstructionShape,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementScalar
>
class GemmTensorOp {
public:
using WarpShape = GemmShape<
((Shape::kM + InstructionShape::kM - 1) / InstructionShape::kM) * InstructionShape::kM,
((Shape::kN + InstructionShape::kN - 1) / InstructionShape::kN) * InstructionShape::kN,
InstructionShape::kK
>;
using MmaWarp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape,
InstructionShape,
double, // Data type of A elements
cutlass::layout::RowMajor, // Layout of A matrix
double, // Data type of B elements
cutlass::layout::ColumnMajor, // Layout of B matrix
double, // Data type of C elements
cutlass::layout::RowMajor // Layout of C matrix
>::Type;
// Number of 'K groups'
int const kKgroups = (Shape::kK + InstructionShape::kK - 1) / InstructionShape::kK;
// Define a 'FragmentIterator' to iterate over slices of accumulators
using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename MmaWarp::Shape,
InstructionShape,
double,
typename MmaWarp::Policy::Operator::FragmentC,
cutlass::layout::RowMajor
>;
// Define an epilogue 'Tile Iteterator' to iterate over slices of elements in Shared Memory
using AccumulatorTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpCanonical<
typename MmaWarp::Shape,
InstructionShape,
double,
cutlass::layout::RowMajor
>;
using TensorRefA = typename MmaWarp::IteratorA::TensorRef;
using TensorRefB = typename MmaWarp::IteratorB::TensorRef;
using TensorRefC = typename AccumulatorTileIterator::TensorRef;
public:
CUTLASS_HOST_DEVICE
GemmTensorOp() { }
CUTLASS_DEVICE
void operator()(
ElementScalar alpha,
TensorRefA ref_A,
TensorRefB ref_B,
ElementScalar beta,
TensorRefC ref_C,
TensorRefC ref_D,
int lane_id) const {
// Instantiate iterators pointing to slices of the A and B matrices in shared memory
typename MmaWarp::IteratorA iter_A(ref_A, {Shape::kM, Shape::kK}, lane_id);
typename MmaWarp::IteratorB iter_B(ref_B, {Shape::kK, Shape::kN}, lane_id);
// Instantiate and clear accumulator tile holding the C matrix
typename MmaWarp::FragmentC accum;
accum.clear();
// Instantiate the warp-level matrix multiply operator
MmaWarp mma_op;
// Instantiate fragments holding the slice of the matrix held by each warp
typename MmaWarp::FragmentA frag_A[2];
typename MmaWarp::FragmentB frag_B[2];
// Load fragments from shared memory
iter_A.load(frag_A[0]);
iter_B.load(frag_B[0]);
++iter_A;
++iter_B;
// Load fragments from shared memory
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < kKgroups; ++k) {
// Load fragments from shared memory
iter_A.load(frag_A[(k + 1) % 2]);
iter_B.load(frag_B[(k + 1) % 2]);
++iter_A;
++iter_B;
// Compute the matrix multiply
mma_op(accum, frag_A[k % 2], frag_B[k % 2], accum);
}
// Instantiate iterators
FragmentIterator accum_frag_it(accum);
AccumulatorTileIterator source_tile_it(ref_C, {Shape::kM, Shape::kN}, lane_id);
AccumulatorTileIterator dest_tile_it(ref_D, {Shape::kM, Shape::kN}, lane_id);
// Define function objects for linear scaling operation
cutlass::multiplies<typename FragmentIterator::Fragment> mul_source;
cutlass::multiply_add<typename FragmentIterator::Fragment> mul_add_accumulator;
// Iterate over the epilogue components
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < FragmentIterator::kIterations; ++idx) {
// Define storage for slices of the accumulators
typename FragmentIterator::Fragment accum_fragment;
typename FragmentIterator::Fragment source_fragment;
// Select a slice of accumulators from the accumulator tile
accum_frag_it.load(accum_fragment);
++accum_frag_it;
// Load a corresponding slice from Shared memory
source_tile_it.load(source_fragment);
++source_tile_it;
// Compute linear scaling - alpha * AB + beta * C
source_fragment = mul_source(beta, source_fragment);
accum_fragment = mul_add_accumulator(alpha, accum_fragment, source_fragment);
// Store the result to shared memory
dest_tile_it.store(accum_fragment);
++dest_tile_it;
}
}
};
} // namespace warp
} // namespace gemm
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
// Sample kernel demonstrating a collective GEMM operation by a warp on arbitrary matrices held
// in Shared Memory.
__global__ void kernel(
double *D_gmem,
double alpha,
double const *A_gmem,
double const *B_gmem,
double beta,
double const *C_gmem) {
// Define several matrices in shared memory
__shared__ double A[kM][kK];
__shared__ double B[kN][kK];
__shared__ double C[kM][kN];
// Copy data into SMEM
if (threadIdx.x == 0) {
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 0; m < kM; ++m) {
for (int k = 0; k < kK; ++k) {
A[m][k] = A_gmem[m * kK + k];
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 0; n < kN; ++n) {
for (int k = 0; k < kK; ++k) {
B[n][k] = B_gmem[n * kK + k];
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 0; m < kM; ++m) {
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 0; n < kN; ++n) {
C[m][n] = C_gmem[m * kN + n];
}
}
}
__syncthreads();
//
// Instantiate a warp-level matrix multiply operator given the fundamental instruction shape (8x8x4),
// overall shape, data type of each operand, and layout of each operand.
//
using GemmTensorOp = cutlass::gemm::warp::GemmTensorOp<
cutlass::gemm::GemmShape<kM, kN, kK>,
cutlass::gemm::GemmShape<8, 8, 4>,
double, // Data type of A elements
cutlass::layout::RowMajor, // Layout of A matrix
double, // Data type of B elements
cutlass::layout::ColumnMajor, // Layout of B matrix
double, // Data type of C elements
cutlass::layout::RowMajor, // Layout of C matrix
double // Scalar type of alpha and beta
>;
// Instantiate the GEMM operator
GemmTensorOp gemm;
// Execute the warp-level GEMM operation
gemm(
alpha,
{&A[0][0], kK},
{&B[0][0], kK},
beta,
{&C[0][0], kN},
{&C[0][0], kN},
threadIdx.x);
__syncthreads();
// Copy data into SMEM
if (threadIdx.x == 0) {
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 0; m < kM; ++m) {
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 0; n < kN; ++n) {
D_gmem[m * kN + n] = C[m][n];
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to canonical warp-level GEMM operation
int main(int argc, const char *arg[]) {
bool notSupported = false;
// CUTLASS must be compiled with CUDA 11 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "This example requires compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Return 0 so tests are considered passing if run on unsupported platforms.
return 0;
}
cutlass::HostTensor<double, cutlass::layout::RowMajor> A({kM, kK});
cutlass::HostTensor<double, cutlass::layout::ColumnMajor> B({kK, kN});
cutlass::HostTensor<double, cutlass::layout::RowMajor> C({kM, kN});
cutlass::HostTensor<double, cutlass::layout::RowMajor> D({kM, kN});
uint64_t seed = 2020;
double max = 8;
double min = -8;
cutlass::reference::host::TensorFillRandomUniform(
A.host_view(),
seed,
max,
min,
0
);
cutlass::reference::host::TensorFillRandomUniform(
B.host_view(),
seed + 17,
max,
min,
0
);
cutlass::reference::host::TensorFillRandomUniform(
C.host_view(),
seed + 31,
max,
min,
0
);
A.sync_device();
B.sync_device();
C.sync_device();
D.sync_device();
dim3 grid(1,1);
dim3 block(32, 1, 1);
double alpha = 2.25;
double beta = 1.24;
kernel<<< grid, block >>>(
D.device_data(),
alpha,
A.device_data(),
B.device_data(),
beta,
C.device_data()
);
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Failed to synchronize device after kernel launch." << std::endl;
return -1;
}
D.sync_host();
// Compute reference on host
cutlass::HostTensor<double, cutlass::layout::RowMajor> D_ref({kM, kN}, false);
cutlass::reference::host::GemmComplex(
{kM, kN, kK},
alpha,
A.host_ref(),
cutlass::ComplexTransform::kNone,
B.host_ref(),
cutlass::ComplexTransform::kNone,
beta,
C.host_ref(),
D_ref.host_ref(),
double()
);
// Verify reference matches computed
if (!cutlass::reference::host::TensorEquals(
D.host_view(),
D_ref.host_view())) {
std::cerr
<< "A =\n" << A.host_view()
<< "\n\nB = \n" << B.host_view()
<< "\n\nC = " << C.host_view()
<< "\n\nRef =\n" << D_ref.host_view()
<< "\n\nD =\n" << D.host_view() << "\n\n";
std::cerr << "Error - device results mismatch host reference." << std::endl;
return -1;
}
std::cout << "Passed" << std::endl;
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| examples/19_tensorop_canonical/tensorop_canonical.cu/0 | {
"file_path": "examples/19_tensorop_canonical/tensorop_canonical.cu",
"repo_id": "examples",
"token_count": 5070
} | 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h)
data types in tensor cores. One big advantage is that we can load in fp32 data and convert them
implicitly to tf32 inside the GEMM kernel which means no change is needed to accelerate traditional
fp32 data by using NVIDIA Ampere architecture.
We can use the tf32 mode of tensor core to emulate a fast accurate SGEMM kernel which is accelerated
using Ampere Tensor Cores (see include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h).
The trick is very simple
a x b = (a_big + a_small) x (b_big + b_small) = a_big x b_big + a_big x b_small + a_small x b_big
big = convert_to_tf32(fp32)
small = convert_to_tf32(fp32 - big)
a_small x b_small is discarded because they are too small.
This example demonstrates usage of this kernel, along with accuracy measurements w.r.t. actual FP32
results (SGEMM using SIMT) and against FP64 results (DGEMM)
To enable this feature, the only change needs to make is to change the default OpMultiplyAdd to
OpMultiplyAddFastF32.
Now, we have several different flavors of sgemm now in the profiler for Ampere. Here are the difference
sgemm // CUDA core SIMT kernel. FP32 in, accumulated in FP32, FP32 out.
s1688gemm // Use 3xTF32 to emulate FP32. FP32 in, converted in TF32-big and TF32-small internally,
// accumulated in FP32, FP32 out.
s1688tf32gemm // Use 1xTF32. FP32 in, converted to one TF32 internally, accumulated in FP32, FP32 out.
s1688gemm_tf32 // TF32 in, accumulated in FP32, FP32 out.
*/
#include <iostream>
#include <vector>
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_reduce.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
int m, n, k;
double l2_norm_3xtf32_vs_fp64;
double l2_norm_1xtf32_vs_fp64;
double l2_norm_fp32_vs_fp64;
// ctor
Result(
int m, int n, int k,
double runtime_ms, double gflops,
double l2_norm_3xtf32_vs_fp64,
double l2_norm_1xtf32_vs_fp64,
double l2_norm_fp32_vs_fp64) :
m(m), n(n), k(k),
runtime_ms(runtime_ms), gflops(gflops),
l2_norm_3xtf32_vs_fp64(l2_norm_3xtf32_vs_fp64),
l2_norm_1xtf32_vs_fp64(l2_norm_1xtf32_vs_fp64),
l2_norm_fp32_vs_fp64(l2_norm_fp32_vs_fp64) {}
Result() {}
//
// Methods
//
static void print_csv_header() {
std::cout << "M,N,K,Runtime(ms),GFLOPS,3xTF32_vs_FP64,1xTF32_vs_FP64,FP32_vs_FP64" << std::endl;
}
void print_csv_row() {
std::cout << m << ","
<< n << ","
<< k << ","
<< runtime_ms << ","
<< gflops << ","
<< l2_norm_3xtf32_vs_fp64 << ","
<< l2_norm_1xtf32_vs_fp64 << ","
<< l2_norm_fp32_vs_fp64 << std::endl;
}
};
std::vector<Result> results;
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
float alpha;
float beta;
std::string rand_mode;
int iterations;
int seed;
bool benchmark;
Options():
help(false),
problem_size({3456, 4096, 4096}),
iterations(20),
seed(1),
alpha(1),
beta(),
rand_mode("uniform"),
benchmark(false) { }
bool valid() {
//
// CUTLASS attempts to load 128b vectors of F32 elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 4 elements.
//
int const kAlignment = 4;
if ((problem_size.m() % kAlignment) ||
(problem_size.n() % kAlignment) ||
(problem_size.k() % kAlignment)) {
// misaligned tensors
return false;
}
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("seed", seed);
cmd.get_cmd_line_argument("rand_mode", rand_mode);
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "27_ampere_3xtf32_fast_accurate_tensorop_gemm example\n\n"
<< " This example uses the CUTLASS Library to emulate FP32 with TF32 tensorop GEMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --rand_mode=<string> gauss / uniform*\n\n"
<< " --seed=<int> Random number seed (1*)\n\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm --m=1024 --n=512 \\\n"
<< " --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product();
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 64, 16>; // <- threadblock tile M = 128, N = 128, K = 16
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 32, 16>; // <- warp tile M = 64, N = 64, K = 16
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
float, // <- data type of output matrix
128 / cutlass::sizeof_bits<float>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
float, // <- data type of accumulator
float>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 3;
// Alignment
constexpr int Alignment = 4;
//
// Gemm Operators (Gemm_3xTF32, Gemm_1xTF32, GEMM_F32, GEMM_F64)
//
// Gemm_3xTF32
using Gemm_3xTF32 = cutlass::gemm::device::Gemm<
float,
LayoutInputA,
float,
LayoutInputB,
float,
LayoutOutput,
float,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
Alignment,
Alignment,
false,
cutlass::arch::OpMultiplyAddFastF32>;
// Gemm_1xTF32
using Gemm_1xTF32 = cutlass::gemm::device::Gemm<
float,
LayoutInputA,
float,
LayoutInputB,
float,
LayoutOutput,
float,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
Alignment,
Alignment,
false,
cutlass::arch::OpMultiplyAdd>;
// Gemm_F64
using Gemm_F64 = cutlass::reference::device::Gemm<
double,
LayoutInputA,
double,
LayoutInputB,
double,
LayoutOutput,
double,
double>;
// Gemm_F32
using Gemm_F32 = cutlass::reference::device::Gemm<
float,
LayoutInputA,
float,
LayoutInputB,
float,
LayoutOutput,
float,
float>;
bool run(Options &options) {
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size = options.problem_size;
////////////////////////////////////////////////////////////////////////////////
/// 1. Initialize F32 Precision input tensors using CUTLASS helper functions
////////////////////////////////////////////////////////////////////////////////
cutlass::HostTensor<float, LayoutInputA> tensor_a_F32(problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<float, LayoutInputB> tensor_b_F32(problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<float, LayoutOutput> tensor_c_F32(problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<float, LayoutOutput> tensor_d_F32(problem_size.mn()); // <- Create matrix D with dimensions M x N
if (options.rand_mode == "uniform") {
const float min = -1;
const float max = 1;
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a_F32.host_view(),
options.seed,
double(max),
double(min)); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_F32.host_view(),
options.seed,
double(max),
double(min)); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_F32.host_view(),
options.seed,
double(max),
double(min)); // <- Fill matrix C on host with uniform-distribution random data
} else if (options.rand_mode == "gauss") {
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomGaussian(
tensor_a_F32.host_view(),
options.seed,
double(0),
double(5)); // <- Fill matrix A on host with gaussian-distribution random data
cutlass::reference::host::TensorFillRandomGaussian(
tensor_b_F32.host_view(),
options.seed,
double(0),
double(5)); // <- Fill matrix B on host with gaussian-distribution random data
cutlass::reference::host::TensorFillRandomGaussian(
tensor_c_F32.host_view(),
options.seed,
double(0),
double(5)); // <- Fill matrix C on host with gaussian-distribution random data
}
cutlass::reference::host::TensorFill(
tensor_d_F32.host_view()); // <- fill matrix D on host with zeros
// Copy data from host to GPU
tensor_a_F32.sync_device();
tensor_b_F32.sync_device();
tensor_c_F32.sync_device();
tensor_d_F32.sync_device();
////////////////////////////////////////////////////////////////////////////////
/// 2. Initialize F64 tensors using the same values used for F32
////////////////////////////////////////////////////////////////////////////////
// Gemm input operands (A, B, C)
cutlass::HostTensor<double, LayoutInputA> tensor_a_F64(problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<double, LayoutInputB> tensor_b_F64(problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<double, LayoutOutput> tensor_c_F64(problem_size.mn()); // <- Create matrix C with dimensions M x N
// Gemm output (D) for GEMM_F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_F64(problem_size.mn()); // <- Create matrix D with dimensions M x N
// Gemm output (D) for GEMM_3xTF32
cutlass::HostTensor<float, LayoutOutput> tensor_d_3xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N
// Gemm output (D) for GEMM_1xTF32
cutlass::HostTensor<float, LayoutOutput> tensor_d_1xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N
// Copy values from the DP tensors
cutlass::reference::host::TensorCopy(tensor_a_F64.host_view(), tensor_a_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_b_F64.host_view(), tensor_b_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_c_F64.host_view(), tensor_c_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_F64.host_view(), tensor_d_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_3xTF32.host_view(), tensor_d_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_1xTF32.host_view(), tensor_d_F32.host_view());
// Copy data from host to GPU
tensor_a_F64.sync_device();
tensor_b_F64.sync_device();
tensor_c_F64.sync_device();
tensor_d_F64.sync_device();
tensor_d_3xTF32.sync_device();
tensor_d_1xTF32.sync_device();
// Initialize alpha and beta for dot product computation
float alpha = float(options.alpha);
float beta = float(options.beta);
// Split K dimension into 1 partitions
int split_k_slices = 1;
////////////////////////////////////////////////////////////////////////////////
/// 3. Run 3xTF32 kernel within a profiling loop
////////////////////////////////////////////////////////////////////////////////
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm_3xTF32::Arguments arguments_3xtf32{problem_size, // <- problem size of matrix multiplication
tensor_a_F32.device_ref(), // <- reference to matrix A on device
tensor_b_F32.device_ref(), // <- reference to matrix B on device
tensor_c_F32.device_ref(), // <- reference to matrix C on device
tensor_d_3xTF32.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size_3xtf32 = Gemm_3xTF32::get_workspace_size(arguments_3xtf32);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace_3xtf32(workspace_size_3xtf32);
// Instantiate CUTLASS kernel depending on templates
Gemm_3xTF32 gemm_op_3xTF32;
// Check the problem size is supported or not
cutlass::Status status_3xtf32 = gemm_op_3xTF32.can_implement(arguments_3xtf32);
CUTLASS_CHECK(status_3xtf32);
// Initialize CUTLASS kernel with arguments and workspace pointer
status_3xtf32 = gemm_op_3xTF32.initialize(arguments_3xtf32, workspace_3xtf32.get());
CUTLASS_CHECK(status_3xtf32);
// Result structure
Result result;
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return false;
}
}
// Record an event at the start of a series of GEMMs
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return false;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
// Launch initialized CUTLASS kernel
status_3xtf32 = gemm_op_3xTF32();
CUTLASS_CHECK(status_3xtf32);
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return false;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return false;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return false;
}
// Compute average runtime and GFLOPs.
result.m = problem_size.m();
result.n = problem_size.n();
result.k = problem_size.k();
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
tensor_d_3xTF32.sync_host();
////////////////////////////////////////////////////////////////////////////////
/// 4. Run TF32 kernel without profiling loop
////////////////////////////////////////////////////////////////////////////////
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm_1xTF32::Arguments arguments_1xtf32{problem_size, // <- problem size of matrix multiplication
tensor_a_F32.device_ref(), // <- reference to matrix A on device
tensor_b_F32.device_ref(), // <- reference to matrix B on device
tensor_c_F32.device_ref(), // <- reference to matrix C on device
tensor_d_1xTF32.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size_1xtf32 = Gemm_1xTF32::get_workspace_size(arguments_1xtf32);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace_1xtf32(workspace_size_1xtf32);
// Instantiate CUTLASS kernel depending on templates
Gemm_1xTF32 gemm_op_1xtf32;
// Check the problem size is supported or not
cutlass::Status status_1xtf32 = gemm_op_1xtf32.can_implement(arguments_1xtf32);
CUTLASS_CHECK(status_1xtf32);
// Initialize CUTLASS kernel with arguments and workspace pointer
status_1xtf32 = gemm_op_1xtf32.initialize(arguments_1xtf32, workspace_1xtf32.get());
CUTLASS_CHECK(status_1xtf32);
// Launch initialized CUTLASS kernel
status_1xtf32 = gemm_op_1xtf32();
CUTLASS_CHECK(status_1xtf32);
tensor_d_1xTF32.sync_host();
////////////////////////////////////////////////////////////////////////////////
// Run reference kernel (F64)
////////////////////////////////////////////////////////////////////////////////
// Create instantiation for device reference gemm kernel
Gemm_F64 gemm_f64;
// Launch device reference gemm kernel
gemm_f64(problem_size,
alpha,
tensor_a_F64.device_ref(),
tensor_b_F64.device_ref(),
beta,
tensor_c_F64.device_ref(),
tensor_d_F64.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d_F64.sync_host();
////////////////////////////////////////////////////////////////////////////////
// Run reference kernel (F32)
////////////////////////////////////////////////////////////////////////////////
// Create instantiation for device reference gemm kernel
Gemm_F32 gemm_f32;
// Launch device reference gemm kernel
gemm_f32(problem_size,
alpha,
tensor_a_F32.device_ref(),
tensor_b_F32.device_ref(),
beta,
tensor_c_F32.device_ref(),
tensor_d_F32.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d_F32.sync_host();
////////////////////////////////////////////////////////////////////////////////
/////// Compute l2 norms
////////////////////////////////////////////////////////////////////////////////
// l2 norm 3xTF32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_3xTF32_in_F64(problem_size.mn());
cutlass::reference::host::TensorCopy(tensor_d_3xTF32_in_F64.host_view(), tensor_d_3xTF32.host_view());
result.l2_norm_3xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_3xTF32_in_F64.host_view(), tensor_d_F64.host_view());
// l2 norm 1xTF32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_1xTF32_in_F64(problem_size.mn());
cutlass::reference::host::TensorCopy(tensor_d_1xTF32_in_F64.host_view(), tensor_d_1xTF32.host_view());
result.l2_norm_1xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_1xTF32_in_F64.host_view(), tensor_d_F64.host_view());
// l2 norm F32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_F32_in_F64(problem_size.mn());
cutlass::reference::host::TensorCopy(tensor_d_F32_in_F64.host_view(), tensor_d_F32.host_view());
result.l2_norm_fp32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_F32_in_F64.host_view(), tensor_d_F64.host_view());
results.push_back(result);
///////////////////////////////////////////////////////////////////////////////
// Check if output from CUTLASS kernel and reference kernel are equal or not
std::cout << std::fixed;
std::cout.precision(4);
std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout.precision(2);
std::cout << "GFLOPs: " << result.gflops << std::endl;
std::cout << "Normalized L2 norm of" << std::endl;
std::cout.precision(8);
std::cout << std::scientific
<< " - 3xTF32 error with FP64 reference : " << result.l2_norm_3xtf32_vs_fp64 << std::endl
<< " - 1xTF32 error with FP64 reference : " << result.l2_norm_1xtf32_vs_fp64 << std::endl
<< " - FP32 error with FP64 reference : " << result.l2_norm_fp32_vs_fp64 << std::endl;
return true;
}
int main(int argc, const char **argv) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
bool result = true;
if (options.benchmark) {
for (int k = 4; k <= 65536; k *= 2) {
options.problem_size[2] = k;
printf("Gemm problem size: %d x %d x %d\n", \
options.problem_size.m(), options.problem_size.n(), options.problem_size.k());
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
result &= run(options);
}
} else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
result = run(options);
}
if (!result) return -1;
std::cout << std::endl << "CSV results" << std::endl;
Result::print_csv_header();
for(auto &r : results)
r.print_csv_row();
return 0;
}
| examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm.cu/0 | {
"file_path": "examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm.cu",
"repo_id": "examples",
"token_count": 12993
} | 8 |
# PyCUTLASS Examples
This directory contains deprecated examples for PyCUTLASS, a precursor to the CUTLASS Python interface.
For examples of using CUTLASS's actively-maintained Pythonic interface, see the [examples/python](/examples/python) directory.
| examples/40_cutlass_py/README.md/0 | {
"file_path": "examples/40_cutlass_py/README.md",
"repo_id": "examples",
"token_count": 68
} | 9 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Grouped FMHA kernel
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "fmha_grouped_problem_visitor.h"
#include "gemm_kernel_utils.h"
#include "gemm/mma_accum_lambda_iterator.h"
#include "epilogue/epilogue_rescale_output.h"
namespace {
static CUTLASS_DEVICE float atomicMaxFloat(float* addr, float value) {
// source: https://stackoverflow.com/a/51549250
return (value >= 0)
? __int_as_float(atomicMax((int*)addr, __float_as_int(value)))
: __uint_as_float(atomicMin((unsigned int*)addr, __float_as_uint(value)));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename MM0_, ///! Structure for computing P = Q @ K
typename MM1_, ///! Structure for computing O = P @ V
typename scalar_t_,
typename accum_t_,
typename output_t_,
typename output_accum_t_,
bool kKeepOutputInRF, ///! Whether the intermediate output from MM0_ should be kept in the register file
GroupScheduleMode GroupScheduleMode_ ///! Type of scheduling to perform
>
struct FMHAGrouped {
public:
using MM0 = MM0_;
using MM1 = MM1_;
using scalar_t = scalar_t_;
using accum_t = accum_t_;
using output_t = output_t_;
using output_accum_t = output_accum_t_;
static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_;
static constexpr bool kNeedsOutputAccumulatorBuffer = !kKeepOutputInRF &&
!cutlass::platform::is_same<output_accum_t, output_t>::value;
// Parameters to satisfy BaseGrouped
using ElementA = scalar_t;
using ElementB = scalar_t;
using ElementC = accum_t;
using LayoutA = typename MM0::LayoutA;
using LayoutB = typename MM0::ElementB;
using LayoutC = typename MM1::ElementC;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static int const kAlignmentA = MM0::kAlignmentA;
static int const kAlignmentB = MM0::kAlignmentB;
static int const kAlignmentC = 1;
using Mma = typename MM1::Mma;
using EpilogueOutputOp = typename MM1::EpilogueOutputOp;
using ThreadblockSwizzle = void;
using Operator = typename MM1::Operator;
using WarpShape = typename MM1::WarpShape;
using InstructionShape = typename MM1::InstructionShape;
using ElementQ = scalar_t;
using ElementK = scalar_t;
using ElementP = accum_t;
using ElementV = scalar_t;
using ElementO = output_t;
using ElementOAccum = output_accum_t;
using ElementAccumulator = accum_t;
using LayoutQ = typename MM0::LayoutA;
using LayoutK = typename MM0::LayoutB;
using LayoutP = typename MM0::LayoutC;
using LayoutV = typename MM1::LayoutB;
using LayoutO = typename MM1::LayoutC;
static bool const kPreloadV = (MM1::Mma::ArchTag::kMinComputeCapability >= 80 &&
cutlass::sizeof_bits<ElementV>::value == 16);
static int const kAlignmentQ = MM0::kAlignmentA;
static int const kAlignmentK = MM0::kAlignmentB;
static int const kAlignmentV = 1;
using ThreadblockShape = typename MM0::ThreadblockShape;
static int const kQueriesPerBlock = ThreadblockShape::kM;
static int const kKeysPerBlock = ThreadblockShape::kN;
static constexpr bool kSupportsDropout = false;
static constexpr bool kSupportsBias = false;
/// Warp count (concept: GemmShape)
using WarpCount = typename MM1::WarpCount;
static int const kThreadsPerWarp = 32;
static int const kThreadCount = kThreadsPerWarp * WarpCount::kCount;
static constexpr int kNumWarpsPerBlock =
kQueriesPerBlock * kKeysPerBlock / (kThreadsPerWarp * kThreadsPerWarp);
using ProblemVisitor = FMHAGroupedProblemVisitor<
ThreadblockShape,
kGroupScheduleMode,
kThreadCount,
kThreadCount>;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord *problem_sizes0{nullptr};
GemmCoord *problem_sizes1{nullptr};
int problem_count{0};
int threadblock_count{0};
ElementQ ** ptr_Q{nullptr};
ElementK ** ptr_K{nullptr};
ElementP ** ptr_P{nullptr};
ElementV ** ptr_V{nullptr};
ElementO ** ptr_O{nullptr};
ElementOAccum ** ptr_O_accum{nullptr};
typename LayoutQ::Stride::LongIndex *ldq{nullptr};
typename LayoutK::Stride::LongIndex *ldk{nullptr};
typename LayoutP::Stride::LongIndex *ldv{nullptr};
typename LayoutO::Stride::LongIndex *ldo{nullptr};
// Whether causal masking is to be performed
bool causal{false};
// Scale
ElementAccumulator scale{0};
// Only used by device-level operator
GemmCoord *host_problem_sizes{nullptr};
//
// Methods
//
/// Default ctor
Arguments() = default;
/// Ctor
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord *problem_sizes0,
GemmCoord *problem_sizes1,
int problem_count,
int threadblock_count,
ElementQ ** ptr_Q,
ElementK ** ptr_K,
ElementP ** ptr_P,
ElementV ** ptr_V,
ElementO ** ptr_O,
ElementOAccum ** ptr_O_accum,
typename LayoutQ::Stride::LongIndex *ldq,
typename LayoutK::Stride::LongIndex *ldk,
typename LayoutP::Stride::LongIndex *ldp,
typename LayoutV::Stride::LongIndex *ldv,
typename LayoutO::Stride::LongIndex *ldo,
bool causal,
ElementAccumulator scale,
GemmCoord *host_problem_sizes=nullptr
):
problem_sizes0(problem_sizes0),
problem_sizes1(problem_sizes1),
problem_count(problem_count),
threadblock_count(threadblock_count),
ptr_Q(ptr_Q),
ptr_K(ptr_K),
ptr_P(ptr_P),
ptr_V(ptr_V),
ptr_O(ptr_O),
ptr_O_accum(kNeedsOutputAccumulatorBuffer ? ptr_O_accum : (accum_t**)ptr_O),
ldq(ldq),
ldk(ldk),
ldv(ldv),
ldo(ldo),
causal(causal),
scale(scale),
host_problem_sizes(host_problem_sizes)
{
}
bool __host__ check_supported() {
CHECK_ALIGNED_PTR(ptr_Q, kAlignmentQ);
CHECK_ALIGNED_PTR(ptr_K, kAlignmentK);
CHECK_ALIGNED_PTR(ptr_V, kAlignmentV);
XFORMERS_CHECK(ldq % kAlignmentQ == 0, "query is not correctly aligned");
XFORMERS_CHECK(ldk % kAlignmentK == 0, "key is not correctly aligned");
XFORMERS_CHECK(ldv % kAlignmentV == 0, "value is not correctly aligned");
return true;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
typename ProblemVisitor::Params problem_visitor;
int threadblock_count;
ElementQ ** ptr_Q;
ElementK ** ptr_K;
ElementP ** ptr_P;
ElementV ** ptr_V;
ElementO ** ptr_O;
ElementOAccum ** ptr_O_accum;
typename LayoutQ::Stride::LongIndex *ldq;
typename LayoutK::Stride::LongIndex *ldk;
typename LayoutP::Stride::LongIndex *ldv;
typename LayoutO::Stride::LongIndex *ldo;
ElementAccumulator scale;
bool causal;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
ptr_Q(nullptr),
ptr_K(nullptr),
ptr_P(nullptr),
ptr_V(nullptr),
ptr_O(nullptr),
ptr_O_accum(nullptr),
ldq(nullptr),
ldk(nullptr),
ldv(nullptr),
ldo(nullptr),
causal(false),
scale(0)
{ }
CUTLASS_HOST_DEVICE
Params(Arguments const &args,
void *workspace = nullptr,
int tile_count = 0):
problem_visitor(args.problem_sizes0, args.problem_sizes1, args.problem_count, workspace, tile_count),
threadblock_count(args.threadblock_count),
ptr_Q(args.ptr_Q),
ptr_K(args.ptr_K),
ptr_P(args.ptr_P),
ptr_V(args.ptr_V),
ptr_O(args.ptr_O),
ptr_O_accum(kNeedsOutputAccumulatorBuffer ? args.ptr_O_accum : (accum_t**)args.ptr_O),
ldq(args.ldq),
ldk(args.ldk),
ldv(args.ldv),
ldo(args.ldo),
causal(args.causal),
scale(args.scale)
{
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr,
int tile_count = 0) {
problem_visitor = typename ProblemVisitor::Params(args.problem_sizes0,
args.problem_sizes1,
args.problem_count,
workspace, tile_count);
threadblock_count = args.threadblock_count;
ptr_Q = args.ptr_Q;
ptr_K = args.ptr_K;
ptr_P = args.ptr_P;
ptr_V = args.ptr_V;
ptr_O = args.ptr_O;
ptr_O_accum = kNeedsOutputAccumulatorBuffer ? args.ptr_O_accum : (accum_t**)args.ptr_O;
ldq = args.ldq;
ldk = args.ldk;
ldv = args.ldv;
ldo = args.ldo;
causal = args.causal;
scale = args.scale;
}
};
// Shared storage - depends on kernel params
struct ScalingCoefs {
cutlass::Array<ElementAccumulator, kQueriesPerBlock> m_prime;
cutlass::Array<ElementAccumulator, kQueriesPerBlock> s_prime;
cutlass::Array<ElementAccumulator, kQueriesPerBlock> mi;
cutlass::Array<ElementAccumulator, kQueriesPerBlock> out_rescale;
cutlass::Array<ElementAccumulator, kQueriesPerBlock * MM0::MmaCore::WarpCount::kN>
addition_storage;
};
struct SharedStorageEpilogueAtEnd : ScalingCoefs {
struct SharedStorageAfterMM0 {
// Everything here might be overwritten during MM0
typename MM0::AccumulatorSharedStorage si;
typename MM1::Mma::SharedStorage mm1;
};
union {
typename MM0::Mma::SharedStorage mm0;
SharedStorageAfterMM0 after_mm0;
typename MM1::DefaultEpilogue::SharedStorage epilogue;
};
CUTLASS_DEVICE typename MM1::DefaultEpilogue::SharedStorage&
epilogue_shared_storage() {
return epilogue;
}
// ProblemVisitor shared storage can't be overlapped with others
typename ProblemVisitor::SharedStorage problem_visitor;
};
struct SharedStorageEpilogueInLoop : ScalingCoefs {
struct SharedStorageAfterMM0 {
// Everything here might be overwritten during MM0
typename MM0::AccumulatorSharedStorage si;
typename MM1::Mma::SharedStorage mm1;
typename MM1::DefaultEpilogue::SharedStorage epilogue;
};
union {
typename MM0::Mma::SharedStorage mm0;
SharedStorageAfterMM0 after_mm0;
};
CUTLASS_DEVICE typename MM1::DefaultEpilogue::SharedStorage&
epilogue_shared_storage() {
return after_mm0.epilogue;
}
// ProblemVisitor shared storage can't be overlapped with others
typename ProblemVisitor::SharedStorage problem_visitor;
};
using SharedStorage = typename cutlass::platform::conditional<
kKeepOutputInRF,
SharedStorageEpilogueAtEnd,
SharedStorageEpilogueInLoop>::type;
private:
// Parameters to be used by an individual tile
struct TileParams {
CUTLASS_HOST_DEVICE
static int query_start(int threadblock_idx) {
return threadblock_idx * kQueriesPerBlock;
}
// Returns whether this threadblock computes within the number of queries,
// which is determined by the M dimension of problem 0
CUTLASS_HOST_DEVICE
static bool can_compute(int threadblock_idx, const GemmCoord& problem_size0) {
return query_start(threadblock_idx) < problem_size0.m();
}
CUTLASS_HOST_DEVICE
static int num_queries(int threadblock_idx, const GemmCoord& problem_size0) {
return problem_size0.m() - query_start(threadblock_idx);
}
CUTLASS_HOST_DEVICE
static int num_keys(int threadblock_idx, const GemmCoord& problem_size0, bool causal) {
int nk = problem_size0.n();
if (causal) {
nk = cutlass::fast_min(int32_t(query_start(threadblock_idx) + kQueriesPerBlock), nk);
}
return nk;
}
};
public:
//
// Methods
//
CUTLASS_DEVICE
FMHAGrouped() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return Status::kSuccess;
}
static CUTLASS_DEVICE int16_t thread_id() {
return threadIdx.x;
}
static CUTLASS_DEVICE int8_t warp_id() {
return threadIdx.x / kThreadsPerWarp;
}
static CUTLASS_DEVICE int8_t lane_id() {
return threadIdx.x % kThreadsPerWarp;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
auto& m_prime = shared_storage.m_prime;
auto& s_prime = shared_storage.s_prime;
[[maybe_unused]] auto& si = shared_storage.after_mm0.si;
auto& mi = shared_storage.mi;
auto& out_rescale = shared_storage.out_rescale;
ProblemVisitor problem_visitor(
params.problem_visitor,
shared_storage.problem_visitor,
blockIdx.x);
// Outer 'persistent' loop to iterate over tiles
while (problem_visitor.next_tile()) {
GemmCoord problem_size0 = problem_visitor.problem_size0();
GemmCoord problem_size1 = problem_visitor.problem_size1();
const int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx());
if (!TileParams::can_compute(threadblock_idx, problem_size0)) {
problem_visitor.advance(gridDim.x);
continue;
}
const int32_t problem_idx = problem_visitor.problem_index();
if (thread_id() < kQueriesPerBlock) {
s_prime[thread_id()] = ElementAccumulator(0);
out_rescale[thread_id()] = accum_t(1.0);
m_prime[thread_id()] =
-cutlass::platform::numeric_limits<ElementAccumulator>::infinity();
mi[thread_id()] = -cutlass::platform::numeric_limits<ElementAccumulator>::infinity();
}
ElementO *ptr_O = params.ptr_O[problem_idx] + TileParams::query_start(threadblock_idx) * params.ldo[problem_idx];
ElementOAccum *ptr_O_accum = params.ptr_O_accum[problem_idx] + TileParams::query_start(threadblock_idx) * params.ldo[problem_idx];
const int num_queries = TileParams::num_queries(threadblock_idx, problem_size0);
auto createOutputIter = [&](int col) -> typename MM1::OutputTileIterator {
using OutputTileIterator = typename MM1::OutputTileIterator;
return OutputTileIterator(
typename OutputTileIterator::Params{(int32_t)params.ldo[problem_idx]},
ptr_O,
typename OutputTileIterator::TensorCoord{
num_queries, problem_size1.n()},
thread_id(),
{0, col});
};
auto createOutputAccumIter = [&](int col) ->
typename MM1::OutputTileIteratorAccum {
using OutputTileIteratorAccum = typename MM1::OutputTileIteratorAccum;
return OutputTileIteratorAccum(
typename OutputTileIteratorAccum::Params{(int32_t)params.ldo[problem_idx]},
ptr_O_accum,
typename OutputTileIteratorAccum::TensorCoord{
num_queries, problem_size1.n()},
thread_id(),
{0, col});
};
typename MM1::Mma::FragmentC accum_o;
accum_o.clear();
const int num_keys = TileParams::num_keys(threadblock_idx, problem_size0, params.causal);
for (int32_t iter_key_start = 0; iter_key_start < num_keys;
iter_key_start += kKeysPerBlock) {
int32_t problem_size_0_m =
cutlass::fast_min((int32_t)kQueriesPerBlock, num_queries);
int32_t problem_size_0_n = cutlass::fast_min(
(int32_t)kKeysPerBlock, num_keys - iter_key_start);
int32_t const& problem_size_0_k = problem_size0.k();
int32_t const& problem_size_1_n = problem_size1.n();
int32_t const& problem_size_1_k = problem_size_0_n;
auto prologueV = [&](int blockN) {
typename MM1::Mma::IteratorB iterator_V(
typename MM1::IteratorB::Params{MM1::LayoutB(params.ldv[problem_idx])},
params.ptr_V[problem_idx] + iter_key_start * params.ldv[problem_idx],
{problem_size_1_k, problem_size_1_n},
thread_id(),
cutlass::MatrixCoord{0, blockN * MM1::Mma::Shape::kN});
MM1::Mma::prologue(
shared_storage.after_mm0.mm1,
iterator_V,
thread_id(),
problem_size_1_k);
};
__syncthreads(); // Need to have shared memory initialized, and `m_prime`
// updated from end of prev iter
//
// MATMUL: Q.K_t
//
// Computes the block-matrix product of:
// (a) query[query_start:query_end, :]
// with
// (b) key[iter_key_start:iter_key_start + kKeysPerBlock]
// and stores that into `shared_storage.si`
//
ElementQ *ptr_Q = params.ptr_Q[problem_idx] + TileParams::query_start(threadblock_idx) * params.ldq[problem_idx];
// Construct iterators to A and B operands
typename MM0::IteratorA iterator_A(
typename MM0::IteratorA::Params(
typename MM0::MmaCore::LayoutA(params.ldq[problem_idx])),
ptr_Q,
{problem_size_0_m, problem_size_0_k},
thread_id(),
{0, 0});
typename MM0::IteratorB iterator_B(
typename MM0::IteratorB::Params(
typename MM0::MmaCore::LayoutB(params.ldk[problem_idx])),
params.ptr_K[problem_idx] + iter_key_start * params.ldk[problem_idx],
{problem_size_0_k, problem_size_0_n},
thread_id(),
{0, 0});
// Construct thread-scoped matrix multiply
typename MM0::Mma mma(
shared_storage.mm0, thread_id(), warp_id(), lane_id());
typename MM0::Mma::FragmentC accum;
accum.clear();
auto gemm_k_iterations =
(problem_size_0_k + MM0::Mma::Shape::kK - 1) / MM0::Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
__syncthreads();
if (kPreloadV) {
prologueV(0);
} else {
MM1::Mma::drain_cp_asyncs();
}
typename MM0::Mma::Operator::IteratorC::TensorCoord
iteratorC_tile_offset = {
(warp_id() % MM0::Mma::WarpCount::kM),
(warp_id() / MM0::Mma::WarpCount::kM)
};
// Mask out last if causal
if (params.causal && num_keys - iter_key_start <= kKeysPerBlock) {
auto lane_offset = MM0::AccumLambdaIterator::get_lane_offset(
lane_id(), warp_id(), iteratorC_tile_offset);
int32_t last_col;
MM0::AccumLambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {
last_col = TileParams::query_start(threadblock_idx) + accum_m - iter_key_start;
},
[&](int accum_m, int accum_n, int idx) {
if (accum_n > last_col) {
accum[idx] =
-cutlass::platform::numeric_limits<accum_t>::infinity();
}
},
[&](int accum_m) {});
}
// DISPATCH_BOOL(iter_key_start == 0, kIsFirst, ([&] {
// DISPATCH_BOOL(
// num_keys - iter_key_start >= kKeysPerBlock,
// kFullColumns,
// ([&] {
// // Update `mi` from accum stored in registers
// // Also does accum[i] <- exp(accum[i] - mi)
// iterative_softmax<
// typename MM0::Mma::Operator::IteratorC,
// kFullColumns,
// kIsFirst>(
// accum_o,
// accum,
// mi,
// m_prime,
// s_prime,
// lane_id(),
// thread_id(),
// warp_id(),
// num_keys - iter_key_start,
// iteratorC_tile_offset,
// kSupportsBias ? 1.0f : params.scale);
// }));
// }));
// Update `mi` from accum stored in registers
// Also does accum[i] <- exp(accum[i] - mi)
iterative_softmax<typename MM0::Mma::Operator::IteratorC>(
accum_o,
accum,
mi,
m_prime,
s_prime,
out_rescale,
shared_storage.addition_storage,
lane_id(),
thread_id(),
warp_id(),
num_keys - iter_key_start,
iter_key_start == 0,
iteratorC_tile_offset,
kSupportsBias ? 1.0f : params.scale);
// Output results to shared-memory
int warp_idx_mn_0 = warp_id() %
(MM0::Mma::Base::WarpCount::kM * MM0::Mma::Base::WarpCount::kN);
auto output_tile_coords = cutlass::MatrixCoord{
warp_idx_mn_0 % MM0::Mma::Base::WarpCount::kM,
warp_idx_mn_0 / MM0::Mma::Base::WarpCount::kM};
MM0::B2bGemm::accumToSmem(
shared_storage.after_mm0.si, accum, lane_id(), output_tile_coords);
__syncthreads();
//
// MATMUL: Attn . V
// Run the matmul `attn @ V` for a block of attn and V.
// `attn` is read from shared memory (in `shared_storage_si`)
// `V` is read from global memory (with iterator_B)
//
const int64_t nBlockN = kKeepOutputInRF ? 1
: ceil_div(
(int64_t)problem_size_1_n,
int64_t(MM1::ThreadblockShape::kN));
// Iterate over the N dimension of GEMM1
for (int blockN = 0; blockN < nBlockN; ++blockN) {
int gemm_k_iterations =
(problem_size_1_k + MM1::Mma::Shape::kK - 1) / MM1::Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add and store it in accum
// (in registers)
if (!kPreloadV) {
__syncthreads(); // we share shmem between mma and epilogue
}
typename MM1::Mma::IteratorB iterator_V(
typename MM1::IteratorB::Params{MM1::LayoutB(params.ldv[problem_idx])},
params.ptr_V[problem_idx] + iter_key_start * params.ldv[problem_idx],
{problem_size_1_k, problem_size_1_n},
thread_id(),
cutlass::MatrixCoord{0, blockN * MM1::Mma::Shape::kN});
typename MM1::Mma mma_pv(
// operand A: Pij_dropped in shared memory
shared_storage.after_mm0.si.accum_ref(),
// operand B: shared memory staging area for Vj, which is loaded
// from global memory
shared_storage.after_mm0.mm1.operand_B_ref(),
(int)thread_id(),
(int)warp_id(),
(int)lane_id());
mma_pv.set_prologue_done(kPreloadV);
if (!kKeepOutputInRF) {
accum_o.clear();
}
mma_pv(gemm_k_iterations, accum_o, iterator_V, accum_o);
__syncthreads();
if (kPreloadV && !kKeepOutputInRF && blockN + 1 < nBlockN) {
prologueV(blockN + 1);
}
if (!kKeepOutputInRF) {
MM1::Mma::drain_cp_asyncs();
DISPATCH_BOOL(
iter_key_start == 0, kIsFirst, ([&] {
DISPATCH_BOOL(
(iter_key_start + kKeysPerBlock) >= num_keys,
kIsLast,
([&] {
using DefaultEpilogue = typename MM1::DefaultEpilogue;
using DefaultOp = typename MM1::DefaultConfig::EpilogueOutputOp;
using ElementCompute = typename DefaultOp::ElementCompute;
using EpilogueOutputOp = typename cutlass::epilogue::
thread::MemoryEfficientAttentionNormalize<
typename cutlass::platform::conditional<
kIsLast,
output_t,
output_accum_t>::type,
output_accum_t,
DefaultOp::kCount,
typename DefaultOp::ElementAccumulator,
output_accum_t,
kIsFirst,
kIsLast,
cutlass::Array<ElementCompute, kQueriesPerBlock>>;
using Epilogue = typename cutlass::epilogue::threadblock::
EpiloguePipelined<
typename DefaultEpilogue::Shape,
typename MM1::Mma::Operator,
DefaultEpilogue::kPartitionsK,
typename cutlass::platform::conditional<
kIsLast,
typename MM1::OutputTileIterator,
typename MM1::OutputTileIteratorAccum>::type,
typename DefaultEpilogue::
AccumulatorFragmentIterator,
typename DefaultEpilogue::WarpTileIterator,
typename DefaultEpilogue::SharedLoadIterator,
EpilogueOutputOp,
typename DefaultEpilogue::Padding,
DefaultEpilogue::kFragmentsPerIteration,
true, // IterationsUnroll
typename MM1::OutputTileIteratorAccum // Read
// iterator
>;
int col = blockN * MM1::Mma::Shape::kN;
auto source_iter = createOutputAccumIter(col);
auto dest_iter = gemm_kernel_utils::call_conditional<
kIsLast,
decltype(createOutputIter),
decltype(createOutputAccumIter)>::
apply(createOutputIter, createOutputAccumIter, col);
EpilogueOutputOp rescale(s_prime, out_rescale);
Epilogue epilogue(
shared_storage.epilogue_shared_storage(),
thread_id(),
warp_id(),
lane_id());
epilogue(rescale, dest_iter, accum_o, source_iter);
}));
}));
if (!kKeepOutputInRF) {
__syncthreads();
}
}
}
__syncthreads(); // we modify `m_prime` after
}
if (kKeepOutputInRF) {
const bool kIsFirst = true;
const bool kIsLast = true;
using DefaultEpilogue = typename MM1::DefaultEpilogue;
using DefaultOp = typename MM1::DefaultConfig::EpilogueOutputOp;
using ElementCompute = typename DefaultOp::ElementCompute;
using EpilogueOutputOp =
typename cutlass::epilogue::thread::MemoryEfficientAttentionNormalize<
output_t, // output
output_accum_t, // source
DefaultOp::kCount,
typename DefaultOp::ElementAccumulator, // accum
output_accum_t, // compute
kIsFirst,
kIsLast,
cutlass::Array<ElementCompute, kQueriesPerBlock>>;
using Epilogue =
typename cutlass::epilogue::threadblock::EpiloguePipelined<
typename DefaultEpilogue::Shape,
typename MM1::Mma::Operator,
DefaultEpilogue::kPartitionsK,
typename MM1::OutputTileIterator, // destination
typename DefaultEpilogue::AccumulatorFragmentIterator,
typename DefaultEpilogue::WarpTileIterator,
typename DefaultEpilogue::SharedLoadIterator,
EpilogueOutputOp,
typename DefaultEpilogue::Padding,
DefaultEpilogue::kFragmentsPerIteration,
true, // IterationsUnroll
typename MM1::OutputTileIteratorAccum // source tile
>;
auto dest_iter = createOutputIter(0);
EpilogueOutputOp rescale(s_prime, out_rescale);
Epilogue epilogue(
shared_storage.epilogue_shared_storage(),
thread_id(),
warp_id(),
lane_id());
MM1::Mma::drain_cp_asyncs();
epilogue(rescale, dest_iter, accum_o);
}
// Next tile
problem_visitor.advance(gridDim.x);
__syncthreads(); // Don't start the next iteration until all threads are done using shared memory.
}
}
template <typename WarpIteratorC>
CUTLASS_DEVICE static void iterative_softmax(
typename WarpIteratorC::Fragment& frag_o, // output so far
typename WarpIteratorC::Fragment& frag,
cutlass::Array<accum_t, kQueriesPerBlock>& mi,
cutlass::Array<accum_t, kQueriesPerBlock>& m_prime,
cutlass::Array<accum_t, kQueriesPerBlock>& s_prime,
cutlass::Array<accum_t, kQueriesPerBlock>& out_rescale,
cutlass::Array<accum_t, kQueriesPerBlock * MM0::MmaCore::WarpCount::kN>&
addition_storage,
int8_t lane_id,
int8_t thread_id,
int8_t warp_id,
int max_col,
bool is_first,
typename WarpIteratorC::TensorCoord const& tile_offset,
float scaling) {
/* Iterates on the accumulator and corresponding position on result matrix
(1) Update `mi[r]` to the max value of the row `r`
(2) In a second iteration do the following:
(a) accum <- exp(accum - mi)
(b) m_prime <- exp(m_prime - mi)
(c) s_prime <- s_prime * m_prime + sum(accum)
All of this is done on registers, before we store all of this
on shared memory for the next matmul with Value.
*/
using Fragment = typename WarpIteratorC::Fragment;
using LambdaIterator = typename DefaultMmaAccumLambdaIterator<
WarpIteratorC,
accum_t,
kThreadsPerWarp>::Iterator;
// Convert to `accum_t` (rather than double)
constexpr float kLog2e = 1.4426950408889634074; // log_2(e) = M_LOG2E
static_assert(kQueriesPerBlock % kNumWarpsPerBlock == 0, "");
static constexpr int kLinesPerWarp = kQueriesPerBlock / kNumWarpsPerBlock;
frag = cutlass::multiplies<Fragment>()(scaling * kLog2e, frag);
auto lane_offset =
LambdaIterator::get_lane_offset(lane_id, warp_id, tile_offset);
// First update `mi` to the max per-row
{
accum_t max;
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {
max = -cutlass::platform::numeric_limits<accum_t>::infinity();
},
[&](int accum_m, int accum_n, int idx) {
if (accum_n < max_col) {
max = cutlass::fast_max(max, frag[idx]);
}
},
[&](int accum_m) {
// Having 4x atomicMax seems faster than reduce within warp
// first...
atomicMaxFloat(&mi[accum_m], max);
});
}
// Make sure we all share the update values for `mi`
__syncthreads();
// Doing this `exp` is quite expensive. Let's
// split it across the warps
bool restore_mi_to_minus_inf = false;
if (lane_id < kLinesPerWarp) {
int id = warp_id * kLinesPerWarp + lane_id;
auto m_prime_id = m_prime[id];
auto mi_id = mi[id];
bool changed = m_prime_id < mi_id; // `false` if both are -inf
if (changed) {
auto m_prime_exp = exp2f(m_prime_id - mi_id);
out_rescale[id] = m_prime_exp;
s_prime[id] *= m_prime_exp;
} else {
// Only when bias is enabled, it's possible that all the first values
// of attention are masked to `-inf`. In that case we want to avoid
// `nan = exp2f(-inf - (-inf))` so we temporarily set `mi` to 0
if (kSupportsBias &&
mi_id == -cutlass::platform::numeric_limits<accum_t>::infinity()) {
restore_mi_to_minus_inf = true;
mi[id] = 0.0f;
}
out_rescale[id] = 1.0f;
}
}
__syncthreads(); // Update output fragments
if (kKeepOutputInRF && !is_first) {
accum_t line_rescale;
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) { line_rescale = out_rescale[accum_m]; },
[&](int accum_m, int accum_n, int idx) {
frag_o[idx] = frag_o[idx] * line_rescale;
},
[&](int accum_m) {});
}
// Update accum_m, accum_n, ...
{
accum_t mi_row, total_row;
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) { mi_row = mi[accum_m]; },
[&](int accum_m, int accum_n, int idx) {
frag[idx] =
(accum_n < max_col) ? exp2f(frag[idx] - mi_row) : accum_t(0.0);
},
[&](int accum_m) {});
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) { total_row = 0.0; },
[&](int accum_m, int accum_n, int idx) { total_row += frag[idx]; },
[&](int accum_m) {
if (LambdaIterator::reduceSameRow(
lane_id, total_row, [](accum_t a, accum_t b) {
return a + b;
})) {
// NOTE: we could atomically add `total_row` to `s_prime`, but
// it's faster (and deterministic) to avoid atomics here
addition_storage
[accum_m + kQueriesPerBlock * tile_offset.column()] =
total_row;
}
});
}
__syncthreads();
if (lane_id < kLinesPerWarp) {
int id = warp_id * kLinesPerWarp + lane_id;
accum_t total_row = s_prime[id];
if (restore_mi_to_minus_inf) {
// Restore `mi`, see above when we set `restore_mi_to_minus_inf=true`
mi[id] = -cutlass::platform::numeric_limits<accum_t>::infinity();
} else {
m_prime[id] = mi[id];
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < MM0::MmaCore::WarpCount::kN; ++i) {
total_row += addition_storage[id + kQueriesPerBlock * i];
}
s_prime[id] = total_row;
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/fmha_grouped.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/fmha_grouped.h",
"repo_id": "examples",
"token_count": 17648
} | 10 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of tiles
from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses. The first tile
this iterator visits maybe partial, then the remaining tiles are complete.
So, we only need to compute the predicates twice, once before the first tile
and once for the remaining full tiles which can share the same predicates.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileAccessIteratorResidualLast
///
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
typename AccessType,
bool Gather = false>
class PredicatedTileAccessIteratorResidualLast;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for pitch-linear
/// data.
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_,
bool Gather>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::PitchLinear,
AdvanceRank,
ThreadMap_,
AccessType_,
Gather> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates<
Shape,
Element,
Layout,
AdvanceRank,
ThreadMap,
AccessType>;
static int const kAccessesPerVector =
ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(
!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
using Mask = typename UnderlyingPredicates::Mask;
/// Uses a non-template class
struct Params : PredicatedTileAccessIteratorParams {
using Base = PredicatedTileAccessIteratorParams;
// Default ctor
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: Base(
layout.stride(0),
MakePredicatedTileAccessIteratorDesc<
Shape,
Element,
Layout,
kAdvanceRank,
ThreadMap>()()) {}
CUTLASS_HOST_DEVICE
Params(Base const& base) : Base(base) {}
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char*;
private:
//
// Data members
//
UnderlyingPredicates the_predicates;
Mask residual_tile_mask;
/// Parameters object with precomputed internal state
Params params_;
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Below is used when Gather is turned on. We need to record strided_offset
/// and contiguous_offset separated to compute the offset by using
///
/// offset = contiguous_offset + indices[strided_offset]
///
/// Gather indices
int const* indices_;
Index gather_offset_strided;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent,
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
the_predicates.compute_predicates_(extent, is_steady_state);
}
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
/// Precomputed parameters object
Params const& params,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const& threadblock_offset,
/// Gather indices
int const* indices = nullptr)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
the_predicates(extent),
indices_(indices) {
the_predicates.set_predicates(thread_id, threadblock_offset);
the_predicates.get_mask(residual_tile_mask);
// Working around a weird compiler bug happening on P100 for the backward.
// I've seen together: the_predicates.predicates_[0] = 14 (instead of 15)
// residual_tile_mask[0] = 15 (correct)
//
// Adding prints when the value is calculated (in `compute_predicates_`)
// sometimes removes the bug. The consequence is that we skip some
// element of a tensor, leading to wrong results
// Setting `compute_predicates_`'s second argument (`is_steady_state`) to
// true also seems to get rid of the bug - at the cost of twice as many
// comparisons.
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700)
constexpr bool kWorkAroundCompilerBug = false;
#else
constexpr bool kWorkAroundCompilerBug = true;
#endif
the_predicates.compute_predicates_(extent, true && !kWorkAroundCompilerBug);
// update internal pointers
Layout layout(params_.stride_);
if (!Gather) {
add_pointer_offset(layout(the_predicates.thread_offset_));
} else {
gather_offset_strided = the_predicates.thread_offset_.strided();
add_pointer_offset(
layout(make_Coord(the_predicates.thread_offset_.contiguous(), 0)));
}
}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
/// Precomputed parameters object
Params const& params,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
the_predicates.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool is_residual_tile) {
if (is_residual_tile) {
the_predicates.set_mask(residual_tile_mask);
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
if (!Gather) {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided());
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous());
pointer_ += Shape::kStrided * tile_offset.strided();
}
} else {
add_pointer_offset(Shape::kContiguous * tile_offset.contiguous());
gather_offset_strided += Shape::kStrided * tile_offset.strided();
}
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
if (Gather) {
assert(indices_);
if (!valid()) {
return nullptr;
}
LongIndex contiguous_offset = the_predicates.iteration_contiguous_ *
(ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value /
8) +
the_predicates.iteration_vector_;
int strided_index = gather_offset_strided +
the_predicates.iteration_strided_ * ThreadMap::Delta::kStrided;
LongIndex strided_offset = indices_[strided_index] *
LongIndex(params_.stride_) * sizeof_bits<Element>::value / 8;
return reinterpret_cast<AccessType*>(
pointer_ + contiguous_offset + strided_offset);
}
return reinterpret_cast<AccessType*>(
pointer_ +
the_predicates.iteration_contiguous_ *
(ThreadMap::Delta::kContiguous *
sizeof_bits<Element>::value) /
8) +
the_predicates.iteration_vector_;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
the_predicates.operator++();
++the_predicates.iteration_vector_;
if (the_predicates.iteration_vector_ < kAccessesPerVector) {
return *this;
}
the_predicates.iteration_vector_ = 0;
++the_predicates.iteration_contiguous_;
if (the_predicates.iteration_contiguous_ <
ThreadMap::Iterations::kContiguous) {
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
the_predicates.iteration_contiguous_ = 0;
++the_predicates.iteration_strided_;
if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) {
if (!Gather) {
pointer_ += params_.inc_strided_;
}
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
the_predicates.iteration_strided_ = 0;
if (!Gather) {
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced,
// this subtraction as well as the subsequent integer addition are both
// elided by the compiler.
pointer_ -= params_.inc_advance_;
}
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
the_predicates.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
the_predicates.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
the_predicates.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
the_predicates.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() const {
return the_predicates.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for column-major
/// data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_,
bool Gather>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::ColumnMajor,
AdvanceRank,
ThreadMap_,
AccessType_,
Gather> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
AccessType,
Gather>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorResidualLast;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: params_(layout::PitchLinear(layout.stride(0))){};
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const& base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
///< Precomputed parameters object
Params const& params,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const& threadblock_offset,
int const* indices =
nullptr ///< gather/scatter indices, note no support for
///< gather/scatter at this specialization
)
: iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row(),
threadblock_offset.column()),
indices) {}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iterator_.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool enable) {
iterator_.set_residual_tile(enable);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
iterator_.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for row-major
/// data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_,
bool Gather>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::RowMajor,
AdvanceRank,
ThreadMap_,
AccessType_,
Gather> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
AccessType,
Gather>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorResidualLast;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: params_(layout::PitchLinear(layout.stride(0))){};
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const& base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
///< Precomputed parameters object
Params const& params,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const& threadblock_offset,
/// Gather indices
int const* indices = nullptr)
: iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column(),
threadblock_offset.row()),
indices) {}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iterator_.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool enable) {
iterator_.set_residual_tile(enable);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
iterator_.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank 2
/// data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::AffineRankN<2>,
AdvanceRank,
ThreadMap_,
AccessType_,
false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRankN<2>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates<
Shape,
Element,
layout::PitchLinear,
AdvanceRank,
ThreadMap,
AccessType>;
static int const kAccessesPerVector =
ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(
!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingPredicates::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend PredicatedTileAccessIteratorResidualLast;
private:
/// stride of pitch-linear layout (units of Element)
Coord<Layout::kStrideRank, Layout::LongIndex> stride_;
/// amount (in byte) to increment pointer to move to next access along
/// contiguous dimension
LongIndex inc_contiguous_;
/// amount (in byte) to increment pointer from first access of current
/// contiguous dimension to first access of next one.
LongIndex inc_strided_;
/// amount (in byte) to increment pointer from last access of current
/// contiguous dimension to first access of next one.
LongIndex inc_next_strided_;
/// amount (in byte) to increment pointer from last access to first access
/// of next tile
LongIndex inc_next_;
/// amount (in byte) to increment pointer from first access of current tile
/// to first access of next tile
LongIndex inc_advance_;
public:
// Default ctor
CUTLASS_HOST_DEVICE
Params()
: stride_(0),
inc_contiguous_(0),
inc_strided_(0),
inc_next_(0),
inc_advance_(0) {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: stride_({layout.stride(0), layout.stride(1)}) {
inc_contiguous_ =
(LongIndex(stride_[0]) * ThreadMap::Delta::kContiguous) *
sizeof_bits<Element>::value / 8;
inc_strided_ = (LongIndex(stride_[1]) * ThreadMap::Delta::kStrided) *
sizeof_bits<Element>::value / 8;
inc_next_strided_ = inc_strided_ -
LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_;
if (kAdvanceRank) {
// advance along strided dimension
inc_advance_ = Shape::kStrided * LongIndex(stride_[1]) *
sizeof_bits<Element>::value / 8;
} else {
// advance along contiguous dimension
inc_advance_ =
Shape::kContiguous * stride_[0] * sizeof_bits<Element>::value / 8;
}
inc_next_ = inc_advance_ -
LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_ -
LongIndex(ThreadMap::Iterations::kStrided - 1) * inc_strided_;
};
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char*;
//
// Data members
//
/// Parameters object with precomputed internal state
Params params_;
/// Internal pointer to first access of tile
BytePointer pointer_;
UnderlyingPredicates the_predicates;
Mask residual_tile_mask;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent,
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
the_predicates.compute_predicates_(extent, is_steady_state);
}
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
///< Precomputed parameters object
Params const& params,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const& threadblock_offset,
int const* indices =
nullptr ///< gather/scatter indices, note no support for
///< gather/scatter at this specialization
)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
the_predicates(extent) {
the_predicates.set_predicates(thread_id, threadblock_offset);
// update internal pointers
Layout layout(params_.stride_);
add_pointer_offset(layout(the_predicates.thread_offset_));
}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
the_predicates.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool is_residual_tile) {
if (is_residual_tile) {
the_predicates.set_mask(residual_tile_mask);
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1]);
pointer_ += Shape::kContiguous * tile_offset[0];
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0]);
pointer_ += Shape::kStrided * tile_offset[1];
}
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(pointer_) +
the_predicates.iteration_vector_;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
the_predicates.operator++();
++the_predicates.iteration_vector_;
if (the_predicates.iteration_vector_ < kAccessesPerVector) {
return *this;
}
the_predicates.iteration_vector_ = 0;
++the_predicates.iteration_contiguous_;
if (the_predicates.iteration_contiguous_ <
ThreadMap::Iterations::kContiguous) {
pointer_ += params_.inc_contiguous_;
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
the_predicates.iteration_contiguous_ = 0;
++the_predicates.iteration_strided_;
if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) {
pointer_ += params_.inc_next_strided_;
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
the_predicates.iteration_strided_ = 0;
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
the_predicates.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
the_predicates.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
the_predicates.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
the_predicates.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return the_predicates.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank 2
/// column-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::AffineRank2ColumnMajor,
AdvanceRank,
ThreadMap_,
AccessType_,
false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRank2ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
// Map to the underlying AffineRankN<2> layout
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::AffineRankN<2>,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorResidualLast;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given an AffineRankN<2> tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))){};
};
private:
//
// Data members
//
/// Underlying AffineRankN<2> tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
///< Precomputed parameters object
Params const& params,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const& threadblock_offset,
int const* indices =
nullptr ///< gather/scatter indices, note no support for
///< gather/scatter at this specialization
)
: iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row(),
threadblock_offset.column())) {}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iterator_.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool enable) {
iterator_.set_residual_tile(enable);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
iterator_.add_tile_offset(
make_Coord(tile_offset.row(), tile_offset.column()));
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
iterator_.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank-2
/// row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::AffineRank2RowMajor,
AdvanceRank,
ThreadMap_,
AccessType_,
false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRank2RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
// Map to the underlying AffineRankN<2> layout
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::AffineRankN<2>,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorResidualLast;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given an AffineRankN<2> tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))){};
};
private:
//
// Data members
//
/// Underlying AffineRankN<2> tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
///< Precomputed parameters object
Params const& params,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const& threadblock_offset,
int const* indices =
nullptr ///< gather/scatter indices, note no support for
///< gather/scatter at this specialization
)
: iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iterator_.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool enable) {
iterator_.set_residual_tile(enable);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
iterator_.add_tile_offset(
make_Coord(tile_offset.column(), tile_offset.row()));
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
iterator_.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for column-major
/// interleaved data. It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_,
int InterleavedK>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::ColumnMajorInterleaved<InterleavedK>,
AdvanceRank,
ThreadMap_,
AccessType_,
false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
layout::PitchLinearShape<
Shape::kRow * kInterleavedK,
Shape::kColumn / kInterleavedK>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorResidualLast;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const& base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
/// Precomputed parameters object
Params const& params,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const& threadblock_offset,
int const* indices =
nullptr ///< gather/scatter indices, note no support for
///< gather/scatter at this specialization
)
: iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(
extent.row() * kInterleavedK,
extent.column() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row() * kInterleavedK,
threadblock_offset.column() / kInterleavedK)) {}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iterator_.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool enable) {
iterator_.set_residual_tile(enable);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
iterator_.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorResidualLast for row-major
/// interleaved data.
// It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
typename AccessType_,
int InterleavedK>
class PredicatedTileAccessIteratorResidualLast<
Shape_,
Element_,
layout::RowMajorInterleaved<InterleavedK>,
AdvanceRank,
ThreadMap_,
AccessType_,
false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::RowMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element*;
using NonConstPointer = typename platform::remove_const<Element>::type*;
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
layout::PitchLinearShape<
Shape::kColumn * kInterleavedK,
Shape::kRow / kInterleavedK>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorResidualLast;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const& base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
/// Precomputed parameters object
Params const& params,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const& threadblock_offset,
int const* indices =
nullptr ///< gather/scatter indices, note no support for
///< gather/scatter at this specialization
)
: iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(
extent.column() * kInterleavedK,
extent.row() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column() * kInterleavedK,
threadblock_offset.row() / kInterleavedK)) {}
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
/// offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast(
Params const& params, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorResidualLast(
params,
pointer,
extent,
thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iterator_.set_iteration_index(index);
}
CUTLASS_HOST_DEVICE
void set_residual_tile(bool enable) {
iterator_.set_residual_tile(enable);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const& tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType* get() const {
return reinterpret_cast<AccessType*>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast& operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorResidualLast operator++(int) {
PredicatedTileAccessIteratorResidualLast self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const& mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask& mask) {
iterator_.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/iterators/predicated_tile_access_iterator_residual_last.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/iterators/predicated_tile_access_iterator_residual_last.h",
"repo_id": "examples",
"token_count": 22473
} | 11 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator without splitk
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename OutputOp_ ///< Output operator
>
class FusedBiasActEpilogue {
public:
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using OutputOp = OutputOp_;
/// Output layout is always row-major
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
public:
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
public:
/// Constructor
CUTLASS_DEVICE
FusedBiasActEpilogue(
){ }
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
AccumulatorTile &accumulators, ///< Complete warp-level accumulator tile
AccumulatorTile & fused_bias_act_accumlators,
OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
bool need_bias = output_op.is_source_needed();
if (need_bias)
compute_source_needed_(output_op, accumulators, fused_bias_act_accumlators, source_iterator);
else
compute_source_no_needed_(output_op, accumulators, fused_bias_act_accumlators);
}
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
AccumulatorTile &accumulators, ///< Complete warp-level accumulator tile
AccumulatorTile & fused_bias_act_accumlators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
compute_source_no_needed_(output_op, accumulators, fused_bias_act_accumlators);
}
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
AccumulatorTile &accumulators, ///< Complete warp-level accumulator tile
AccumulatorTile & fused_bias_act_accumlators,
OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
AccumulatorFragmentIterator fused_bias_act_fragment_iterator(fused_bias_act_accumlators);
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
source_iterator.load(source_fragment);
++source_iterator;
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
typename AccumulatorFragmentIterator::Fragment fused_bias_act_fragment;
fused_bias_act_fragment = output_op(accum_fragment, source_fragment);
fused_bias_act_fragment_iterator.store(fused_bias_act_fragment);
++fused_bias_act_fragment_iterator;
}
}
CUTLASS_DEVICE
void compute_source_no_needed_(
OutputOp const &output_op, ///< Output operator
AccumulatorTile &accumulators, ///< Complete warp-level accumulator tile
AccumulatorTile & fused_bias_act_accumlators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
AccumulatorFragmentIterator fused_bias_act_fragment_iterator(fused_bias_act_accumlators);
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < AccumulatorFragmentIterator::kIterations; ++iter) {
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
typename AccumulatorFragmentIterator::Fragment fused_bias_act_fragment;
fused_bias_act_fragment = output_op(accum_fragment);
fused_bias_act_fragment_iterator.store(fused_bias_act_fragment);
++fused_bias_act_fragment_iterator;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/fused_bias_act_epilogue.h/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/fused_bias_act_epilogue.h",
"repo_id": "examples",
"token_count": 2743
} | 12 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import os
class replace_fix_impl:
def __init__(self, src_dir, dst_dir, cutlass_deps_root):
self.src_dir = src_dir
self.dst_dir = dst_dir
self.cutlass_deps_root = cutlass_deps_root
def gen_code(self):
for sub_dir in os.walk(self.src_dir):
files_in_sub_dir = sub_dir[2]
src_dirs = sub_dir[0]
output_dirs = self.dst_dir + sub_dir[0][len(self.src_dir):]
if not os.path.exists(output_dirs):
os.mkdir(output_dirs)
for f in files_in_sub_dir:
with open(src_dirs +"/" + f, 'r') as current_file:
output_lines = []
lines = current_file.readlines()
for line in lines:
if(len(line) >= len("#include \"cutlass") and line[:len("#include \"cutlass")] == "#include \"cutlass"):
new_line = "#include \"" + self.cutlass_deps_root + line[len("#include \""):]
# print(new_line)
output_lines.append(new_line)
else:
output_lines.append(line)
with open(output_dirs + "/" + f, "w+") as dest_file:
dest_file.writelines(output_lines)
| examples/44_multi_gemm_ir_and_codegen/ir_gen/replace_fix_impl_header.py/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/replace_fix_impl_header.py",
"repo_id": "examples",
"token_count": 1202
} | 13 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing elementwise operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cute/tensor.hpp"
#include "cute/numeric/numeric_types.hpp"
#include "gather_tensor.hpp"
namespace cutlass::epilogue::collective {
/// Applies an element wise operation to all elements within the fragment
/// and scatter-writes them out to destination storage.
/// GatherC and ScatterD are types of user-defined functions that apply the
/// transoformation of the strided coordinate (e.g. through an index array).
template <
class StrideC_,
class StrideD_,
class ThreadEpilogueOp_,
class EpilogueSchedule_,
class GatherC_,
class ScatterD_
>
class EpilogueGatherScatter {
public:
//
// Type Aliases
//
using EpilogueSchedule = EpilogueSchedule_;
// derived types of output thread level operator
using ThreadEpilogueOp = ThreadEpilogueOp_;
using ElementOutput = typename ThreadEpilogueOp::ElementOutput;
using ElementAccumulator = typename ThreadEpilogueOp::ElementAccumulator;
using ElementCompute = typename ThreadEpilogueOp::ElementCompute;
using ElementScalar = ElementCompute;
using ElementC = typename ThreadEpilogueOp::ElementC;
using StrideC = StrideC_;
using ElementD = typename ThreadEpilogueOp::ElementD;
using StrideD = StrideD_;
// Every epilogue needs these two GmemTiledCopy{C,D} aliases.
// If you don't know what they should be, just use void.
using GmemTiledCopyC = void;
using GmemTiledCopyD = void;
using GatherC = GatherC_;
using ScatterD = ScatterD_;
static const int kOutputAlignment = ThreadEpilogueOp::kCount;
using AlignmentType = typename cute::uint_bit<sizeof_bits<ElementOutput>::value * kOutputAlignment>::type;
static_assert(cute::rank(StrideC{}) == 3, "StrideCD must be rank-3: [M, N, L]");
static_assert(cute::rank(StrideD{}) == 3, "StrideCD must be rank-3: [M, N, L]");
struct SharedStorage { };
// Host side epilogue arguments
struct Arguments {
typename ThreadEpilogueOp::Params thread_params{};
ElementC const* ptr_C = nullptr;
StrideC dC{};
ElementD* ptr_D = nullptr;
StrideD dD{};
GatherC gather_C{};
ScatterD scatter_D{};
};
// Device side epilogue params
using Params = Arguments;
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(
[[maybe_unused]] ProblemShape const& _,
Arguments const& args,
[[maybe_unused]] void* workspace) {
return args;
}
template<class ProblemShape>
CUTLASS_HOST_DEVICE static bool
can_implement(
[[maybe_unused]] ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
return true;
}
CUTLASS_HOST_DEVICE
EpilogueGatherScatter(Params const& params_) : params(params_) { }
template<
class ProblemShapeMNKL,
class BlockShapeMNK,
class BlockCoordMNKL,
class FrgEngine, class FrgLayout,
class TiledMma,
class ResidueMNK
>
CUTLASS_DEVICE void
operator()(
ProblemShapeMNKL problem_shape_mnkl,
BlockShapeMNK blk_shape_MNK,
BlockCoordMNKL blk_coord_mnkl,
cute::Tensor<FrgEngine, FrgLayout> const& accumulators,
TiledMma tiled_mma,
ResidueMNK residue_mnk,
int thread_idx,
char* smem_buf)
{
using namespace cute;
using X = Underscore;
static_assert(cute::rank(ProblemShapeMNKL{}) == 4, "ProblemShapeMNKL must be rank 4");
static_assert(is_static<BlockShapeMNK>::value, "ThreadBlock tile shape must be static");
static_assert(cute::rank(BlockShapeMNK{}) == 3, "BlockShapeMNK must be rank 3");
static_assert(cute::rank(BlockCoordMNKL{}) == 4, "BlockCoordMNKL must be rank 3");
(void) smem_buf;
ThreadEpilogueOp epilogue_op{params.thread_params};
// Separate out problem shape for convenience
auto M = get<0>(problem_shape_mnkl);
auto N = get<1>(problem_shape_mnkl);
auto L = get<3>(problem_shape_mnkl);
auto stride_c = detail::get_epilogue_stride<EpilogueSchedule>(params.dC);
auto stride_d = detail::get_epilogue_stride<EpilogueSchedule>(params.dD);
// Represent the full output tensor
Tensor mC_mnl = make_gather_tensor(make_gmem_ptr(params.ptr_C), make_shape(M,N,L), stride_c, params.gather_C); // (m,n,l)
Tensor mD_mnl = make_gather_tensor(make_gmem_ptr(params.ptr_D), make_shape(M,N,L), stride_d, params.scatter_D); // (m,n,l)
Tensor gC_mnl = local_tile(mC_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l)
Tensor gD_mnl = local_tile(mD_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l)
// Slice to get the tile this CTA is responsible for
auto [m_coord, n_coord, k_coord, l_coord] = blk_coord_mnkl;
Tensor gC = gC_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N)
Tensor gD = gD_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N)
// Partition source and destination tiles to match the accumulator partitioning
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCgD = thr_mma.partition_C(gD); // (VEC,THR_M,THR_N)
Tensor tCgC = thr_mma.partition_C(gC); // (VEC,THR_M,THR_N)
static_assert(is_static<FrgLayout>::value, "Accumulator layout must be static");
CUTE_STATIC_ASSERT_V(size(tCgC) == size(tCgD),
"Source and destination must have the same number of elements.");
CUTE_STATIC_ASSERT_V(size(tCgD) == size(accumulators),
"Accumulator count must have the same destination element count.");
// Make an identity coordinate tensor for predicating our output MN tile
auto cD = make_identity_tensor(make_shape(unwrap(shape<0>(gD)), unwrap(shape<1>(gD))));
Tensor tCcD = thr_mma.partition_C(cD);
// source is needed
if (epilogue_op.is_source_needed()) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(accumulators); ++i) {
if (elem_less(tCcD(i), make_coord(get<0>(residue_mnk), get<1>(residue_mnk)))) {
tCgD(i) = epilogue_op(accumulators(i), tCgC(i));
}
}
}
// source is not needed, avoid load
else {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < size(accumulators); ++i) {
if (elem_less(tCcD(i), make_coord(get<0>(residue_mnk), get<1>(residue_mnk)))) {
tCgD(i) = epilogue_op(accumulators(i));
}
}
}
}
private:
Params params;
};
} // namespace cutlass::epilogue::collective
| examples/52_hopper_gather_scatter_fusion/scatter_epilogue.hpp/0 | {
"file_path": "examples/52_hopper_gather_scatter_fusion/scatter_epilogue.hpp",
"repo_id": "examples",
"token_count": 3297
} | 14 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "cutlass/util/helper_cuda.hpp"
template <class ProblemShape, class CtaTiler,
class TA, class AStride, class ASmemLayout, class TiledCopyA,
class TB, class BStride, class BSmemLayout, class TiledCopyB,
class TC, class CStride, class CSmemLayout, class TiledMma,
class Alpha, class Beta>
__global__ static
__launch_bounds__(decltype(size(TiledMma{}))::value)
void
gemm_device(ProblemShape shape_MNK, CtaTiler cta_tiler,
TA const* A, AStride dA, ASmemLayout sA_layout, TiledCopyA copy_a,
TB const* B, BStride dB, BSmemLayout sB_layout, TiledCopyB copy_b,
TC * C, CStride dC, CSmemLayout , TiledMma mma,
Alpha alpha, Beta beta)
{
using namespace cute;
// Preconditions
CUTE_STATIC_ASSERT_V(rank(shape_MNK) == Int<3>{}); // (M, N, K)
CUTE_STATIC_ASSERT_V(rank(cta_tiler) == Int<3>{}); // (BLK_M, BLK_N, BLK_K)
CUTE_STATIC_ASSERT_V(size(copy_a) == size(mma)); // NumThreads
CUTE_STATIC_ASSERT_V(size(copy_b) == size(mma)); // NumThreads
static_assert(is_static<ASmemLayout>::value);
static_assert(is_static<BSmemLayout>::value);
static_assert(is_static<CSmemLayout>::value);
CUTE_STATIC_ASSERT_V(size<0>(ASmemLayout{}) == size<0>(cta_tiler)); // BLK_M
CUTE_STATIC_ASSERT_V(size<0>(CSmemLayout{}) == size<0>(cta_tiler)); // BLK_M
CUTE_STATIC_ASSERT_V(size<0>(BSmemLayout{}) == size<1>(cta_tiler)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(CSmemLayout{}) == size<1>(cta_tiler)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(ASmemLayout{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(size<1>(BSmemLayout{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(congruent(select<0,2>(shape_MNK), dA)); // dA strides for shape MK
CUTE_STATIC_ASSERT_V(congruent(select<1,2>(shape_MNK), dB)); // dB strides for shape NK
CUTE_STATIC_ASSERT_V(congruent(select<0,1>(shape_MNK), dC)); // dC strides for shape MN
//
// Full and Tiled Tensors
//
// Represent the full tensors
Tensor mA = make_tensor(make_gmem_ptr(A), select<0,2>(shape_MNK), dA); // (M,K)
Tensor mB = make_tensor(make_gmem_ptr(B), select<1,2>(shape_MNK), dB); // (N,K)
Tensor mC = make_tensor(make_gmem_ptr(C), select<0,1>(shape_MNK), dC); // (M,N)
// Get the appropriate blocks for this thread block
auto cta_coord = make_coord(blockIdx.x, blockIdx.y, _); // (m,n,k)
Tensor gA = local_tile(mA, cta_tiler, cta_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,k)
Tensor gB = local_tile(mB, cta_tiler, cta_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,k)
Tensor gC = local_tile(mC, cta_tiler, cta_coord, Step<_1,_1, X>{}); // (BLK_M,BLK_N)
// Shared memory buffers
__shared__ TA smemA[cosize_v<ASmemLayout>];
__shared__ TB smemB[cosize_v<BSmemLayout>];
Tensor sA = make_tensor(make_smem_ptr(smemA), sA_layout); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(smemB), sB_layout); // (BLK_N,BLK_K,PIPE)
//
// Partition the copying of A and B tiles across the threads
//
ThrCopy thr_copy_a = copy_a.get_slice(threadIdx.x);
Tensor tAgA = thr_copy_a.partition_S(gA); // (CPY,CPY_M,CPY_K,k)
Tensor tAsA = thr_copy_a.partition_D(sA); // (CPY,CPY_M,CPY_K,PIPE)
ThrCopy thr_copy_b = copy_b.get_slice(threadIdx.x);
Tensor tBgB = thr_copy_b.partition_S(gB); // (CPY,CPY_N,CPY_K,k)
Tensor tBsB = thr_copy_b.partition_D(sB); // (CPY,CPY_N,CPY_K,PIPE)
CUTE_STATIC_ASSERT_V(size<1>(tAgA) == size<1>(tAsA)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tAgA) == size<2>(tAsA)); // CPY_K
CUTE_STATIC_ASSERT_V(size<1>(tBgB) == size<1>(tBsB)); // CPY_N
CUTE_STATIC_ASSERT_V(size<2>(tBgB) == size<2>(tBsB)); // CPY_K
//
// PREFETCH
//
auto K_PIPE_MAX = size<3>(tAsA);
// Total count of tiles
int k_tile_count = size<3>(tAgA);
// Current tile index in gmem to read from
int k_tile_next = 0;
// Start async loads for all pipes but the last
CUTE_UNROLL
for (int k_pipe = 0; k_pipe < K_PIPE_MAX-1; ++k_pipe) {
copy(copy_a, tAgA(_,_,_,k_tile_next), tAsA(_,_,_,k_pipe));
copy(copy_b, tBgB(_,_,_,k_tile_next), tBsB(_,_,_,k_pipe));
cp_async_fence();
--k_tile_count;
if (k_tile_count > 0) { ++k_tile_next; }
}
//
// Define A/B partitioning and C accumulators
//
ThrMMA thr_mma = mma.get_slice(threadIdx.x);
Tensor tCsA = thr_mma.partition_A(sA); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCsB = thr_mma.partition_B(sB); // (MMA,MMA_N,MMA_K,PIPE)
Tensor tCgC = thr_mma.partition_C(gC); // (MMA,MMA_M,MMA_N)
// Allocate registers for pipelining
Tensor tCrA = thr_mma.make_fragment_A(tCsA(_,_,_,0)); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.make_fragment_B(tCsB(_,_,_,0)); // (MMA,MMA_N,MMA_K)
// Allocate the accumulators -- same size as the projected data
Tensor tCrC = thr_mma.make_fragment_C(tCgC); // (MMA,MMA_M,MMA_N)
CUTE_STATIC_ASSERT_V( shape(tCrA) == shape(tCsA)); // (MMA,MMA_M,MMA_K)
CUTE_STATIC_ASSERT_V( shape(tCrB) == shape(tCsB)); // (MMA,MMA_N,MMA_K)
CUTE_STATIC_ASSERT_V( shape(tCrC) == shape(tCgC)); // (MMA,MMA_M,MMA_N)
CUTE_STATIC_ASSERT_V(size<1>(tCgC) == size<1>(tCsA)); // MMA_M
CUTE_STATIC_ASSERT_V(size<2>(tCgC) == size<1>(tCsB)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // MMA_K
// Clear the accumulators
clear(tCrC);
#if 0
if(thread0()) {
print(" mA : "); print( mA); print("\n");
print(" gA : "); print( gA); print("\n");
print(" sA : "); print( sA); print("\n");
print("tAgA : "); print(tAgA); print("\n");
print("tAsA : "); print(tAsA); print("\n");
}
#endif
#if 0
if(thread0()) {
print(" mB : "); print( mB); print("\n");
print(" gB : "); print( gB); print("\n");
print(" sB : "); print( sB); print("\n");
print("tBgB : "); print(tBgB); print("\n");
print("tBsB : "); print(tBsB); print("\n");
}
#endif
#if 0
if(thread0()) {
print(" mC : "); print( mC); print("\n");
print(" gC : "); print( gC); print("\n");
print("tCsA : "); print(tCsA); print("\n");
print("tCsB : "); print(tCsB); print("\n");
print("tCgC : "); print(tCgC); print("\n");
print("tCrA : "); print(tCrA); print("\n");
print("tCrB : "); print(tCrB); print("\n");
print("tCrC : "); print(tCrC); print("\n");
}
#endif
#if 1
// Current pipe index in smem to read from
int smem_pipe_read = 0;
// Current pipe index in smem to write to
int smem_pipe_write = K_PIPE_MAX-1;
// Pipe slice
Tensor tCsA_p = tCsA(_,_,_,smem_pipe_read);
Tensor tCsB_p = tCsB(_,_,_,smem_pipe_read);
// Size of the register pipeline
auto K_BLOCK_MAX = size<2>(tCrA);
// PREFETCH register pipeline
if (K_BLOCK_MAX > 1) {
// Wait until our first prefetched tile is loaded in
cp_async_wait<K_PIPE_MAX-2>();
__syncthreads();
// Prefetch the first rmem from the first k-tile
copy(tCsA_p(_,_,Int<0>{}), tCrA(_,_,Int<0>{}));
copy(tCsB_p(_,_,Int<0>{}), tCrB(_,_,Int<0>{}));
}
//
// PIPELINED MAIN LOOP
// TUTORIAL: Example of a gemm loop that pipelines shared memory using SM80's cp.async instructions
// and explicit pipelines in shared memory.
// Data is read from global(k_tile_next) to shared(smem_pipe_write).
// Data is read from shared(smem_pipe_read) to registers(k_block_next).
// Data is computed on registers(b_block).
//
// This allows all copies and compute to overlap:
// Copy from gmem->smem can overlap with copies from smem->rmem and compute on rmem.
// Copy from smem->rmem can overlap with compute on rmem.
//
CUTE_NO_UNROLL
while (k_tile_count > -(K_PIPE_MAX-1))
{
CUTE_UNROLL
for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
// Slice the smem_pipe_read smem
tCsA_p = tCsA(_,_,_,smem_pipe_read);
tCsB_p = tCsB(_,_,_,smem_pipe_read);
// Commit the smem for smem_pipe_read
cp_async_wait<K_PIPE_MAX-2>();
__syncthreads();
}
// Load A, B shmem->regs for k_block+1
auto k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static
copy(tCsA_p(_,_,k_block_next), tCrA(_,_,k_block_next));
copy(tCsB_p(_,_,k_block_next), tCrB(_,_,k_block_next));
// Copy gmem to smem before computing gemm on each k-pipe
if (k_block == 0)
{
copy(copy_a, tAgA(_,_,_,k_tile_next), tAsA(_,_,_,smem_pipe_write));
copy(copy_b, tBgB(_,_,_,k_tile_next), tBsB(_,_,_,smem_pipe_write));
cp_async_fence();
// Advance the gmem tile
--k_tile_count;
if (k_tile_count > 0) { ++k_tile_next; }
// Advance the smem pipe
smem_pipe_write = smem_pipe_read;
++smem_pipe_read;
smem_pipe_read = (smem_pipe_read == K_PIPE_MAX) ? 0 : smem_pipe_read;
}
// Thread-level register gemm for k_block
gemm(mma, tCrA(_,_,k_block), tCrB(_,_,k_block), tCrC);
}
}
#endif
//
// Epilogue
//
axpby(alpha, tCrC, beta, tCgC);
}
// Setup params for a NT GEMM
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_nt(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
using namespace cute;
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define NT strides (mixed)
auto dA = make_stride(Int<1>{}, ldA); // (dM, dK)
auto dB = make_stride(Int<1>{}, ldB); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 8>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
auto bP = Int<3>{}; // Pipeline
// Define the smem layouts (static)
auto sA = make_layout(make_shape(bM, bK, bP)); // (m,k,p) -> smem_idx; m-major
auto sB = make_layout(make_shape(bN, bK, bP)); // (n,k,p) -> smem_idx; n-major
auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx; m-major
// Define the thread layouts (static)
TiledCopy copyA = make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<uint128_t>, TA>{},
Layout<Shape<_32,_8>>{}, // Thr layout 32x8 m-major
Layout<Shape< _4,_1>>{});// Val layout 4x1 m-major
TiledCopy copyB = make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<uint128_t>, TB>{},
Layout<Shape<_32,_8>>{}, // Thr layout 32x8 n-major
Layout<Shape< _4,_1>>{});// Val layout 4x1 n-major
TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{},
Layout<Shape<_16,_16,_1>>{}); // 16x16x1 TiledMMA
#if 0
print(copyA);
print(copyB);
print(mmaC);
#endif
#if 0
print_latex(copyA);
print_latex(copyB);
print_latex(mmaC);
#endif
dim3 dimBlock(size(mmaC));
dim3 dimGrid(size(ceil_div(M, bM)),
size(ceil_div(N, bN)));
gemm_device<<<dimGrid, dimBlock, 0, stream>>>
(prob_shape, cta_tiler,
A, dA, sA, copyA,
B, dB, sB, copyB,
C, dC, sC, mmaC,
alpha, beta);
}
// Setup params for a NT GEMM
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_tn(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
using namespace cute;
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define TN strides (mixed)
auto dA = make_stride(ldA, Int<1>{}); // (dM, dK)
auto dB = make_stride(ldB, Int<1>{}); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 8>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
auto bP = Int<3>{}; // Pipeline
// Define the smem layouts (static)
auto sA_atom = make_layout(make_shape ( bM, bK),
make_stride(Int<1>{}, bM+Int<1>{})); // (m,k) -> smem_idx; padded m-major
auto sB_atom = make_layout(make_shape ( bN, bK),
make_stride(Int<1>{}, bN+Int<1>{})); // (n,k) -> smem_idx; padded n-major
auto sA = tile_to_shape(sA_atom, make_shape(bM, bK, bP));
auto sB = tile_to_shape(sA_atom, make_shape(bN, bK, bP));
auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx
// Define the thread layouts (static)
TiledCopy copyA = make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<TA>, TA>{},
Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major
Layout<Shape< _1,_1>>{}); // Val layout 1x1
TiledCopy copyB = make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<TB>, TB>{},
Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major
Layout<Shape< _1,_1>>{}); // Val layout 1x1
TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{},
Layout<Shape<_16,_16,_1>>{}); // 16x16x1 TiledMMA
#if 0
print(copyA);
print(copyB);
print(mmaC);
#endif
#if 0
print_latex(copyA);
print_latex(copyB);
print_latex(mmaC);
#endif
dim3 dimBlock(size(mmaC));
dim3 dimGrid(size(ceil_div(M, bM)),
size(ceil_div(N, bN)));
gemm_device<<<dimGrid, dimBlock, 0, stream>>>
(prob_shape, cta_tiler,
A, dA, sA, copyA,
B, dB, sB, copyB,
C, dC, sC, mmaC,
alpha, beta);
}
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm(char transA, char transB, int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
if (transA == 'N' && transB == 'T') {
return gemm_nt(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
} else
if (transA == 'T' && transB == 'N') {
return gemm_tn(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
}
assert(false && "Not implemented");
}
int main(int argc, char** argv)
{
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (props.major < 8) {
std::cout << "This example requires an Ampere GPU or newer (CC >= 80)" << std::endl;
// Return 0 so tests pass if run on unsupported architectures or CUDA Toolkits.
return 0;
}
int m = 5120;
if (argc >= 2)
sscanf(argv[1], "%d", &m);
int n = 5120;
if (argc >= 3)
sscanf(argv[2], "%d", &n);
int k = 4096;
if (argc >= 4)
sscanf(argv[3], "%d", &k);
char transA = 'N';
if (argc >= 5)
sscanf(argv[4], "%c", &transA);
char transB = 'T';
if (argc >= 6)
sscanf(argv[5], "%c", &transB);
using TA = float;
using TB = float;
using TC = float;
using TI = float;
TI alpha = 1.0;
TI beta = 0.0;
std::cout << "M = " << m << std::endl;
std::cout << "N = " << n << std::endl;
std::cout << "K = " << k << std::endl;
std::cout << "C = A^" << transA << " B^" << transB << std::endl;
thrust::host_vector<TA> h_A(m*k);
thrust::host_vector<TB> h_B(n*k);
thrust::host_vector<TC> h_C(m*n);
for (int j = 0; j < m*k; ++j) h_A[j] = static_cast<TA>( 2*(rand() / double(RAND_MAX)) - 1 );
for (int j = 0; j < n*k; ++j) h_B[j] = static_cast<TB>( 2*(rand() / double(RAND_MAX)) - 1 );
for (int j = 0; j < m*n; ++j) h_C[j] = static_cast<TC>(-1);
thrust::device_vector<TA> d_A = h_A;
thrust::device_vector<TB> d_B = h_B;
thrust::device_vector<TC> d_C = h_C;
double gflops = (2.0*m*n*k) * 1e-9;
const int timing_iterations = 100;
GPU_Clock timer;
int ldA = 0, ldB = 0, ldC = m;
if (transA == 'N') {
ldA = m;
} else if (transA == 'T') {
ldA = k;
} else {
assert(false);
}
if (transB == 'N') {
ldB = k;
} else if (transB == 'T') {
ldB = n;
} else {
assert(false);
}
// Run once
d_C = h_C;
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
CUTE_CHECK_LAST();
thrust::host_vector<TC> cute_result = d_C;
// Timing iterations
timer.start();
for (int i = 0; i < timing_iterations; ++i) {
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
}
double cute_time = timer.seconds() / timing_iterations;
CUTE_CHECK_LAST();
printf("CUTE_GEMM: [%6.1f]GFlop/s (%6.4f)ms\n", gflops / cute_time, cute_time*1000);
return 0;
}
| examples/cute/tutorial/sgemm_sm80.cu/0 | {
"file_path": "examples/cute/tutorial/sgemm_sm80.cu",
"repo_id": "examples",
"token_count": 9827
} | 15 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/mma.hpp>
#include <cute/numeric/complex.hpp>
// Config
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
# define CUTE_ARCH_MMA_SM80_ENABLED
#if (__CUDA_ARCH__ <= 900)
#define CUTE_ARCH_MMA_B1_AND_SM80_ENABLED
#endif
#if (__CUDA_ARCH__ <= 890)
#define CUTE_ARCH_MMA_B1_XOR_SM80_ENABLED
#endif
#endif
namespace cute {
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x8 TN
struct SM80_16x8x8_F16F16F16F16_TN
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 "
"{%0, %1},"
"{%2, %3},"
"{%4},"
"{%5, %6};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F16F16F16F16_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x16 TN
struct SM80_16x8x16_F16F16F16F16_TN
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16 "
"{%0, %1},"
"{%2, %3, %4, %5},"
"{%6, %7},"
"{%8, %9};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_F16F16F16F16_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x8 TN
struct SM80_16x8x8_F32F16F16F32_TN
{
using DRegisters = float[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(float & d0, float & d1, float & d2, float & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
float const & c0, float const & c1, float const & c2, float const & c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"f"(c0), "f"(c1), "f"(c2), "f"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F32F16F16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x16 TN
struct SM80_16x8x16_F32F16F16F32_TN
{
using DRegisters = float[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(float & d0, float & d1, float & d2, float & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
float const & c0, float const & c1, float const & c2, float const & c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"f"(c0), "f"(c1), "f"(c2), "f"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_F32F16F16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x8 TN
struct SM80_16x8x8_F32BF16BF16F32_TN
{
using DRegisters = float[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(float & d0, float & d1, float & d2, float & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
float const & c0, float const & c1, float const & c2, float const & c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k8.row.col.f32.bf16.bf16.f32 "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"f"(c0), "f"(c1), "f"(c2), "f"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F32BF16BF16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x16 TN
struct SM80_16x8x16_F32BF16BF16F32_TN
{
using DRegisters = float[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(float & d0, float & d1, float & d2, float & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
float const & c0, float const & c1, float const & c2, float const & c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"f"(c0), "f"(c1), "f"(c2), "f"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_F32BF16BF16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x4 TN
struct SM80_16x8x4_F32TF32TF32F32_TN
{
using DRegisters = float[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(float & d0, float & d1, float & d2, float & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
float const & c0, float const & c1, float const & c2, float const & c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k4.row.col.f32.tf32.tf32.f32 "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"f"(c0), "f"(c1), "f"(c2), "f"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x4_F32TF32TF32F32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x8 TN
struct SM80_16x8x8_F32TF32TF32F32_TN
{
using DRegisters = float[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(float & d0, float & d1, float & d2, float & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
float const & c0, float const & c1, float const & c2, float const & c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"f"(c0), "f"(c1), "f"(c2), "f"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F32TF32TF32F32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x4 TN
struct SM80_8x8x4_F64F64F64F64_TN
{
using DRegisters = double[2];
using ARegisters = double[1];
using BRegisters = double[1];
using CRegisters = double[2];
CUTE_HOST_DEVICE static void
fma(double & d0, double & d1,
double const& a0,
double const& b0,
double const& c0, double const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k4.row.col.f64.f64.f64.f64 "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=d"(d0), "=d"(d1)
: "d"(a0),
"d"(b0),
"d"(c0), "d"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x4_F64F64F64F64_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
// MMA 8x8x4 TN with Planar Complex multiplication
struct SM80_8x8x4_C64C64C64C64_TN
{
using DRegisters = complex<double>[2];
using ARegisters = complex<double>[1];
using BRegisters = complex<double>[1];
using CRegisters = complex<double>[2];
CUTE_HOST_DEVICE static void
fma(complex<double> & d0, complex<double> & d1,
complex<double> const& a0,
complex<double> const& b0,
complex<double> const& c0, complex<double> const& c1)
{
// Because thrust::complex does not provide a mutable ref
double& rd0 = reinterpret_cast<double(&)[2]>(d0)[0];
double& id0 = reinterpret_cast<double(&)[2]>(d0)[1];
double& rd1 = reinterpret_cast<double(&)[2]>(d1)[0];
double& id1 = reinterpret_cast<double(&)[2]>(d1)[1];
// d.real() = a.real() * b.real() + c.real();
SM80_8x8x4_F64F64F64F64_TN::fma(
rd0, rd1,
a0.real(),
b0.real(),
c0.real(), c1.real());
// d.imag() = a.imag() * b.real() + c.imag();
SM80_8x8x4_F64F64F64F64_TN::fma(
id0, id1,
a0.imag(),
b0.real(),
c0.imag(), c1.imag());
// d.real() = -a.imag() * b.imag() + d.real();
SM80_8x8x4_F64F64F64F64_TN::fma(
rd0, rd1,
-a0.imag(),
b0.imag(),
d0.real(), d1.real());
// d.imag() = a.real() * b.imag() + d.imag();
SM80_8x8x4_F64F64F64F64_TN::fma(
id0, id1,
a0.real(),
b0.imag(),
d0.imag(), d1.imag());
}
};
// MMA 8x8x4 TN with Gaussian Complex multiplication:
// (a + bi)*(c + di)
// yields
// t0 += a*c
// t1 += b*d
// t2 += (a+b)*(c+d)
// then
// re = t0 - t1
// im = t2 - t0 - t1
struct SM80_8x8x4_GC64C64C64GC64_TN
{
struct GaussComplex {
double t0, t1, t2;
CUTE_HOST_DEVICE //constexpr
operator complex<double>() const { return complex<double>(t0 - t1, t2 - t0 - t1); }
CUTE_HOST_DEVICE friend //constexpr
complex<double> operator*(GaussComplex const& a, complex<double> const& b) { return static_cast<complex<double>>(a) * b; }
CUTE_HOST_DEVICE friend //constexpr
complex<double> operator*(complex<double> const& a, GaussComplex const& b) { return b * a; }
CUTE_HOST_DEVICE friend //constexpr
complex<double> operator+(GaussComplex const& a, complex<double> const& b) { return static_cast<complex<double>>(a) + b; }
CUTE_HOST_DEVICE friend //constexpr
complex<double> operator+(complex<double> const& a, GaussComplex const& b) { return b + a; }
};
using DRegisters = GaussComplex[2];
using ARegisters = complex<double>[1];
using BRegisters = complex<double>[1];
using CRegisters = GaussComplex[2];
CUTE_HOST_DEVICE static void
fma(GaussComplex & d0, GaussComplex & d1,
complex<double> const& a0,
complex<double> const& b0,
GaussComplex const& c0, GaussComplex const& c1)
{
SM80_8x8x4_F64F64F64F64_TN::fma(d0.t0, d1.t0,
a0.real(),
b0.real(),
c0.t0, c1.t0);
SM80_8x8x4_F64F64F64F64_TN::fma(d0.t1, d1.t1,
a0.imag(),
b0.imag(),
c0.t1, c1.t1);
SM80_8x8x4_F64F64F64F64_TN::fma(d0.t2, d1.t2,
a0.real() + a0.imag(),
b0.real() + b0.imag(),
c0.t2, c1.t2);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x16 TN
struct SM80_8x8x16_S32S8S8S32_TN
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x16 TN
struct SM80_8x8x16_S32S8S8S32_TN_SATURATE
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32.satfinite "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x16 TN
struct SM80_16x8x16_S32S8S8S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32 "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x16 TN
struct SM80_16x8x16_S32S8S8S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32S8S8S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32S8S8S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x16 TN
struct SM80_8x8x16_S32S8U8S32_TN
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k16.row.col.s32.s8.u8.s32 "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x16 TN
struct SM80_8x8x16_S32S8U8S32_TN_SATURATE
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k16.row.col.s32.s8.u8.s32.satfinite "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x16 TN
struct SM80_16x8x16_S32S8U8S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.s8.u8.s32 "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x16 TN
struct SM80_16x8x16_S32S8U8S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.s8.u8.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32S8U8S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s8.u8.s32 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32S8U8S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s8.u8.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x16 TN
struct SM80_8x8x16_S32U8S8S32_TN
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k16.row.col.s32.u8.s8.s32 "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x16 TN
struct SM80_8x8x16_S32U8S8S32_TN_SATURATE
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k16.row.col.s32.u8.s8.s32.satfinite "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x16 TN
struct SM80_16x8x16_S32U8S8S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.u8.s8.s32 "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x16 TN
struct SM80_16x8x16_S32U8S8S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.u8.s8.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32U8S8S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u8.s8.s32 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32U8S8S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u8.s8.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x16 TN
struct SM80_8x8x16_S32U8U8S32_TN
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k16.row.col.s32.u8.u8.s32 "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x16 TN
struct SM80_8x8x16_S32U8U8S32_TN_SATURATE
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k16.row.col.s32.u8.u8.s32.satfinite "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x16 TN
struct SM80_16x8x16_S32U8U8S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.u8.u8.s32 "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x16 TN
struct SM80_16x8x16_S32U8U8S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.u8.u8.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32U8U8S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u8.u8.s32 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32U8U8S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u8.u8.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x32 TN
struct SM80_8x8x32_S32S4S4S32_TN
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32 "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x32 TN
struct SM80_8x8x32_S32S4S4S32_TN_SATURATE
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32.satfinite "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32S4S4S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s4.s4.s32 "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32S4S4S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s4.s4.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x64 TN
struct SM80_16x8x64_S32S4S4S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.s4.s4.s32 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x64 TN
struct SM80_16x8x64_S32S4S4S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.s4.s4.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x32 TN
struct SM80_8x8x32_S32S4U4S32_TN
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k32.row.col.s32.s4.u4.s32 "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x32 TN
struct SM80_8x8x32_S32S4U4S32_TN_SATURATE
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k32.row.col.s32.s4.u4.s32.satfinite "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32S4U4S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s4.u4.s32 "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32S4U4S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s4.u4.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x64 TN
struct SM80_16x8x64_S32S4U4S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.s4.u4.s32 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x64 TN
struct SM80_16x8x64_S32S4U4S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.s4.u4.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x32 TN
struct SM80_8x8x32_S32U4S4S32_TN
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k32.row.col.s32.u4.s4.s32 "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x32 TN
struct SM80_8x8x32_S32U4S4S32_TN_SATURATE
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k32.row.col.s32.u4.s4.s32.satfinite "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32U4S4S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u4.s4.s32 "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32U4S4S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u4.s4.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x64 TN
struct SM80_16x8x64_S32U4S4S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.u4.s4.s32 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x64 TN
struct SM80_16x8x64_S32U4S4S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.u4.s4.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x32 TN
struct SM80_8x8x32_S32U4U4S32_TN
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k32.row.col.s32.u4.u4.s32 "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x32 TN
struct SM80_8x8x32_S32U4U4S32_TN_SATURATE
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k32.row.col.s32.u4.u4.s32.satfinite "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32U4U4S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u4.u4.s32 "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x32 TN
struct SM80_16x8x32_S32U4U4S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u4.u4.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x64 TN
struct SM80_16x8x64_S32U4U4S32_TN
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.u4.u4.s32 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x64 TN
struct SM80_16x8x64_S32U4U4S32_TN_SATURATE
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.u4.u4.s32.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 8x8x128 TN
struct SM80_8x8x128_S32U1U1S32_TN_XORPOPC
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_B1_XOR_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m8n8k128.row.col.s32.b1.b1.s32.xor.popc "
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x128_S32U1U1S32_TN_XORPOPC without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x128 TN
struct SM80_16x8x128_S32U1U1S32_TN_XORPOPC
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_B1_XOR_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k128.row.col.s32.b1.b1.s32.xor.popc "
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x128_S32U1U1S32_TN_XORPOPC without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x256 TN
struct SM80_16x8x256_S32U1U1S32_TN_XORPOPC
{
using DRegisters = uint32_t[4];
using ARegisters = uint32_t[4];
using BRegisters = uint32_t[2];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint32_t const& b0, uint32_t const& b1,
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3)
{
#if defined(CUTE_ARCH_MMA_B1_XOR_SM80_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k256.row.col.s32.b1.b1.s32.xor.popc "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"r"(b0), "r"(b1),
"r"(c0), "r"(c1), "r"(c2), "r"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x256_S32U1U1S32_TN_XORPOPC without CUTE_ARCH_MMA_SM80_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cute
| include/cute/arch/mma_sm80.hpp/0 | {
"file_path": "include/cute/arch/mma_sm80.hpp",
"repo_id": "include",
"token_count": 35275
} | 16 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/underscore.hpp>
#include <cute/int_tuple.hpp>
#include <cute/stride.hpp>
#include <cute/numeric/arithmetic_tuple.hpp>
#include <cute/numeric/integral_ratio.hpp>
#include <cute/numeric/integral_constant.hpp>
namespace cute
{
// Aliases
template <class... Shapes>
using Shape = cute::tuple<Shapes...>;
template <class... Strides>
using Stride = cute::tuple<Strides...>;
template <class... Strides>
using Step = cute::tuple<Strides...>;
template <class... Coords>
using Coord = cute::tuple<Coords...>;
template <class... Layouts>
using Tile = cute::tuple<Layouts...>;
template <class... Ts>
CUTE_HOST_DEVICE constexpr
Shape<Ts...>
make_shape(Ts const&... t) {
return {t...};
}
template <class... Ts>
CUTE_HOST_DEVICE constexpr
Stride<Ts...>
make_stride(Ts const&... t) {
return {t...};
}
template <class... Ts>
CUTE_HOST_DEVICE constexpr
Step<Ts...>
make_step(Ts const&... t) {
return {t...};
}
template <class... Ts>
CUTE_HOST_DEVICE constexpr
Coord<Ts...>
make_coord(Ts const&... t) {
return {t...};
}
template <class... Ts>
CUTE_HOST_DEVICE constexpr
Tile<Ts...>
make_tile(Ts const&... t)
{
return {t...};
}
//
// Layout
//
template <class Shape, class Stride = LayoutLeft::Apply<Shape> >
struct Layout
: private cute::tuple<Shape, Stride> // EBO for static layouts
{
// Expensive in compilation time...
//static_assert(is_congruent<Shape, Stride>::value, "Shape and Stride must be congruent");
// NOTE: This defaults static Shapes/Strides correctly, but not dynamic
CUTE_HOST_DEVICE constexpr
Layout(Shape const& shape = {}, Stride const& stride = {})
: cute::tuple<Shape, Stride>(shape, stride)
{}
//
// Accessors
//
static constexpr int rank = rank_v<Shape>;
CUTE_HOST_DEVICE constexpr
decltype(auto)
layout() {
return *this;
}
CUTE_HOST_DEVICE constexpr
decltype(auto)
layout() const {
return *this;
}
template <int... I>
CUTE_HOST_DEVICE constexpr
decltype(auto)
shape() {
return get<0,I...>(static_cast<cute::tuple<Shape, Stride>&>(*this));
}
template <int... I>
CUTE_HOST_DEVICE constexpr
decltype(auto)
shape() const {
return get<0,I...>(static_cast<cute::tuple<Shape, Stride> const&>(*this));
}
template <int... I>
CUTE_HOST_DEVICE constexpr
decltype(auto)
stride() {
return get<1,I...>(static_cast<cute::tuple<Shape, Stride>&>(*this));
}
template <int... I>
CUTE_HOST_DEVICE constexpr
decltype(auto)
stride() const {
return get<1,I...>(static_cast<cute::tuple<Shape, Stride> const&>(*this));
}
//
// Mappings
//
// Map a logical coordinate to a linear index (Coord has no Underscore slice operators)
// OR
// Slice the layout and return the sublayout (Coord has an Underscore slice op)
template <class Coord>
CUTE_HOST_DEVICE constexpr
auto
operator()(Coord const& coord) const {
if constexpr (has_underscore<Coord>::value) {
return slice(coord, *this);
} else {
return crd2idx(coord, shape(), stride());
}
CUTE_GCC_UNREACHABLE;
}
// Convenience function for multi-dimensional coordinates
template <class Coord0, class Coord1, class... Coords>
CUTE_HOST_DEVICE constexpr
auto
operator()(Coord0 const& c0, Coord1 const& c1, Coords const&... cs) const {
return operator()(make_coord(c0,c1,cs...));
}
//
// Compose
//
template <class OtherLayout>
CUTE_HOST_DEVICE constexpr
auto
compose(OtherLayout const& other) const {
return composition(*this, other);
}
template <class... Layouts>
CUTE_HOST_DEVICE constexpr
auto
compose(Layouts const&... layouts) const {
return composition(*this, make_tile(layouts...));
}
template <class OtherShape>
CUTE_HOST_DEVICE constexpr
auto
with_shape(OtherShape const& shape) const {
return composition(*this, make_layout(shape));
}
template <class... Shapes>
CUTE_HOST_DEVICE constexpr
auto
with_shape(Shapes const&... shapes) const {
return composition(*this, make_layout(make_shape(shapes...)));
}
//
// Tile
//
template <class OtherLayout>
CUTE_HOST_DEVICE constexpr
auto
tile(OtherLayout const& other) const {
return tiled_divide(*this, other);
}
template <class... Layouts>
CUTE_HOST_DEVICE constexpr
auto
tile(Layouts const&... layouts) const {
return tiled_divide(*this, make_tile(layouts...));
}
//
// Utility
//
//
// Index to Coordinate
//
// NOTE: Only valid for compact layouts
// Return the (hierarchical) ND logical coordinate corresponding to the linear index
// @post crd2idx(@a result, shape(), stride()) == idx
// @post congruent(@a result, shape())
template <class IInt,
__CUTE_REQUIRES(is_integral<IInt>::value)>
CUTE_HOST_DEVICE constexpr
auto
get_hier_coord(IInt const& idx) const {
return cute::idx2crd(idx, shape(), stride());
}
// Return the (flat) ND logical coordinate corresponding to the linear index
// @post crd2idx(@a result, shape(), stride()) == idx
// @post rank(@a result) == rank(shape()) && depth(@a result) == 1
template <class IInt,
__CUTE_REQUIRES(is_integral<IInt>::value)>
CUTE_HOST_DEVICE constexpr
auto
get_flat_coord(IInt const& idx) const {
return cute::crd2crd(this->get_hier_coord(idx), shape(), repeat<rank>(Int<1>{}));
}
// Return the generalized column-major 1D logical coordinate corresponding to the linear index
// @post crd2idx(@a result, shape(), stride()) == idx
// @post is_integral<decltype(@a result)>::value
template <class IInt,
__CUTE_REQUIRES(is_integral<IInt>::value)>
CUTE_HOST_DEVICE constexpr
auto
get_1d_coord(IInt const& idx) const {
return cute::crd2idx(this->get_hier_coord(idx), shape());
}
//
// Coordinate to Coordinate
//
#if 0
// Return the (hierarchical) ND logical coordinate corresponding to the linear index
// @post congruent(@a result, shape())
template <class Coord>
CUTE_HOST_DEVICE constexpr
auto
crd_2_hier_coord(Coord const& crd) const {
return cute::crd2crd(crd, shape(), shape());
}
// Return the (flat) ND logical coordinate corresponding to the linear index
// @post rank(@a result) == rank(shape()) && depth(@a result) == 1
template <class Coord>
CUTE_HOST_DEVICE constexpr
auto
crd_2_flat_coord(Coord const& crd) const {
return cute::crd2crd(crd, shape(), product_each(shape()));
}
// Return the generalized column-major 1D logical coordinate corresponding to the linear index
// @post is_integral<decltype(@a result)>::value
template <class Coord>
CUTE_HOST_DEVICE constexpr
auto
crd_2_1d_coord(Coord const& crd) const {
//return cute::crd2crd(crd, shape(), product(shape()));
return cute::crd2idx(crd, shape());
}
#endif
};
// Equality, return a static or dynamic boolean
template <class ShapeA, class StrideA,
class ShapeB, class StrideB>
CUTE_HOST_DEVICE constexpr
auto
operator==(Layout<ShapeA,StrideA> const& layoutA, Layout<ShapeB,StrideB> const& layoutB)
{
return layoutA.shape() == layoutB.shape() && layoutA.stride() == layoutB.stride();
}
template <class Layout>
struct is_layout : false_type {};
template <class Shape, class Stride>
struct is_layout<Layout<Shape,Stride>> : true_type {};
//
// Layout construction
//
template <class Shape, class Stride,
__CUTE_REQUIRES((is_tuple<Shape >::value || is_integral<Shape >::value) &&
(is_tuple<Stride>::value || is_integral<Stride>::value))>
CUTE_HOST_DEVICE constexpr
auto
make_layout(Shape const& shape, Stride const& stride)
{
return Layout<Shape,Stride>(shape, stride);
}
template <class Shape,
__CUTE_REQUIRES(is_tuple<Shape>::value || is_integral<Shape>::value)>
CUTE_HOST_DEVICE constexpr
auto
make_layout(Shape const& shape)
{
return make_layout(shape, compact_col_major(shape));
}
// Construct a layout from multiple layouts by
// concatenating each layout as an independent mode
template <class... Shapes, class... Strides>
CUTE_HOST_DEVICE constexpr
auto
make_layout(Layout<Shapes,Strides> const&... layouts)
{
return make_layout(make_shape (layouts.shape()...),
make_stride(layouts.stride()...));
}
//
// Convenience tags for common layouts
//
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
make_layout(Shape const& shape, GenColMajor)
{
return make_layout(shape, compact_col_major(shape));
}
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
make_layout(Shape const& shape, GenRowMajor)
{
return make_layout(shape, compact_row_major(shape));
}
//
// Advanced Layout constructions
//
// Make a compact layout with shape @a shape and strides following the order induced by @a order.
// Dynamic values in @a order are ignored, considered large, and considered ordered from left to right.
// Example:
// make_ordered_layout(Shape<_2,_2,_2,_2>{}, Step<_0,_2,_3,_1>{})
// -> (_2,_2,_2,_2):(_1,_4,_8,_2)
// make_ordered_layout(make_shape(2,3,4,5), make_step(Int<2>{}, 67, 42, Int<50>{}))
// -> (2,3,4,5):(_1,10,30,2)
template <class Shape, class Order>
CUTE_HOST_DEVICE constexpr
auto
make_ordered_layout(Shape const& shape, Order const& order)
{
return make_layout(shape, compact_order(shape, order));
}
// Make a compact layout with the same shape as @a layout
// and strides following the order induced by @a layout.stride().
// Static-0 strides in the input @a layout are preserved in the output.
// Example:
// make_layout_like(Layout<Shape<_2,_2,_2,_2>, Stride<_0,_2,_4,_1>>{})
// -> (_2,_2,_2,_2):(_0,_2,_4,_1)
// make_layout_like(make_layout(make_shape(2,3,4,5), make_stride(Int<0>{},42,Int<1>{},Int<0>{})))
// -> (2,3,4,5):(_0,4,_1,_0)
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
make_layout_like(Layout<Shape,Stride> const& layout)
{
return make_layout(layout.shape(),
compact_order(filter_zeros(layout.stride(), layout.shape()), layout.stride()));
}
// Make a compact layout with the same shape as @a layout
// and strides following the order induced by @a layout.stride(),
// except mode-0 is always stride-1 and generated column-major.
// The 0th mode is commonly used for MMA_Atoms or Copy_Atoms so this
// generates the 0th mode with LayoutLeft (preserving stride-0s) regardless of the reference layout
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
make_fragment_like(Layout<Shape,Stride> const& layout)
{
constexpr int R = Layout<Shape,Stride>::rank;
if constexpr (R > 1 && is_static<Shape>::value) {
return tiled_product(make_layout(get<0>(layout.shape()),
compact_col_major(filter_zeros(get<0>(layout.stride()), get<0>(layout.shape())))),
make_ordered_layout(take<1,R>(layout.shape()), take<1,R>(layout.stride())));
} else {
return make_layout(layout.shape());
}
CUTE_GCC_UNREACHABLE;
}
template <class Shape,
__CUTE_REQUIRES(is_tuple<Shape>::value || is_integral<Shape>::value)>
CUTE_HOST_DEVICE constexpr
auto
make_fragment_like(Shape const& shape)
{
return make_layout(shape);
}
//
// Make an identity layout that maps a coordinate to itself
//
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
make_identity_layout(Shape const& shape)
{
return make_layout(shape, make_basis_like(shape));
}
//
// Operations to manipulate Layouts like a tuple of pairs
//
// Return the Is...th sublayout.
// For Is... = <I0,I1,...,IN>, equivalent to get<IN>(...get<I1>(get<I0>(layout)))
template <size_t... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
get(Layout<Shape,Stride> const& layout)
{
return make_layout(get<Is...>(layout.shape()),
get<Is...>(layout.stride()));
}
// Return a new layout with only the modes in the range [B,E)
template <int B, int E, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
take(Layout<Shape,Stride> const& layout)
{
static_assert(B < E, "take: empty range error");
static_assert(0 <= B && E <= Layout<Shape,Stride>::rank, "take: range out of bounds");
return make_layout(take<B,E>(layout.shape()),
take<B,E>(layout.stride()));
}
// Return a new layout with only the modes Is... = <I0,I1,...,IN>
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
select(Layout<Shape,Stride> const& layout)
{
return make_layout(select<Is...>(layout.shape()),
select<Is...>(layout.stride()));
}
// Return a layout with depth at most 1
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
flatten(Layout<Shape,Stride> const& layout)
{
return make_layout(flatten(layout.shape()),
flatten(layout.stride()));
}
// Return a layout whose profile is congruent to TargetProfile
// @pre Input layout is flat, flatten(@a layout) == @a layout
// @pre Input layout can be folded to profile, rank(@a layout) == rank(flatten(@a target_profile))
// @post congruent(@a result, @a target_profile)
template <class Shape, class Stride, class TargetProfile>
CUTE_HOST_DEVICE constexpr
auto
unflatten(Layout<Shape,Stride> const& layout, TargetProfile const& target_profile)
{
return make_layout(unflatten(layout.shape(), target_profile),
unflatten(layout.stride(), target_profile));
}
//
// Utilities
//
// Return the sublayout of mode I...
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
decltype(auto)
layout(Layout<Shape,Stride> const& layout)
{
if constexpr (sizeof...(Is) == 0) {
return layout;
} else {
return get<Is...>(layout);
}
CUTE_GCC_UNREACHABLE;
}
// Return the shape of a mode
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
decltype(auto)
shape(Layout<Shape,Stride>& layout)
{
return layout.template shape<Is...>();
}
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
decltype(auto)
shape(Layout<Shape,Stride> const& layout)
{
return layout.template shape<Is...>();
}
// Return the stride of a mode
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
decltype(auto)
stride(Layout<Shape,Stride>& layout)
{
return layout.template stride<Is...>();
}
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
decltype(auto)
stride(Layout<Shape,Stride> const& layout)
{
return layout.template stride<Is...>();
}
// Return the number of elements in a mode
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
size(Layout<Shape,Stride> const& layout)
{
return size(shape<Is...>(layout));
}
// Return the number of modes
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
rank(Layout<Shape,Stride> const& layout)
{
return rank(shape<Is...>(layout));
}
// Return the depth of the layout
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
depth(Layout<Shape,Stride> const& layout)
{
return depth(shape<Is...>(layout));
}
// Return the codomain shape of a mode
// @post size(coshape(@a a)) == cosize(@a a)
// @return C Coordinate with smallest elements such that
// @a elem_less(sub_layout(c), C) for all c < size(@a sub_layout)
// where sub_layout = get<Is...>(layout).
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
coshape(Layout<Shape,Stride> const& layout)
{
// Protect against negative strides
auto abs_sub_layout = make_layout(shape<Is...>(layout),
transform_leaf(stride<Is...>(layout), abs_fn{}));
auto co_coord = as_arithmetic_tuple(abs_sub_layout(size(abs_sub_layout) - Int<1>{}));
return co_coord + repeat_like(co_coord, Int<1>{});
}
// Return the codomain size of a mode
// @return M smallest integer such that
// @a sub_layout(c) < M for all c < size(@a sub_layout)
// where sub_layout = get<Is...>(layout).
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
cosize(Layout<Shape,Stride> const& layout)
{
return size(coshape<Is...>(layout));
}
template <class Layout>
using cosize_t = decltype(cosize(declval<Layout>()));
template <class Layout>
static constexpr int cosize_v = cosize_t<Layout>::value;
// With crd2idx(coord, shape), makes sense to have crd2idx(coord, Layout) as well
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
crd2idx(Coord const& c, Layout<Shape,Stride> const& layout)
{
return crd2idx(c, layout.shape(), layout.stride());
}
//
// Slice and Dice a layout
//
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
slice(Coord const& c, Layout<Shape,Stride> const& layout)
{
return make_layout(slice(c, layout.shape()),
slice(c, layout.stride()));
}
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
slice_and_offset(Coord const& c, Layout<Shape,Stride> const& layout)
{
return cute::make_tuple(slice(c, layout), crd2idx(c, layout));
}
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
dice(Coord const& c, Layout<Shape,Stride> const& layout)
{
return make_layout(dice(c, layout.shape()),
dice(c, layout.stride()));
}
// Compute a pointer offset and (potentially modified) layout from a coordinate
// This exists so it can be overloaded for ComposedLayout
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
domain_offset(Coord const& coord, Layout<Shape,Stride> const& layout)
{
return cute::make_tuple(layout, layout(coord));
}
//
// Transform the modes of a layout
//
namespace detail {
template <class Tuple, class F, int... I>
CUTE_HOST_DEVICE constexpr
auto
transform_layout(Tuple const& t, F&& f, seq<I...>)
{
return make_layout(f(get<I>(t))...);
}
template <class Tuple0, class Tuple1, class F, int... I, int... I0, int... I1>
CUTE_HOST_DEVICE constexpr
auto
transform_layout(Tuple0 const& t0, Tuple1 const& t1, F&& f, seq<I...>, seq<I0...>, seq<I1...>)
{
return make_layout(f(get<I>(t0),get<I>(t1))..., get<I0>(t0)..., get<I1>(t1)...);
}
} // end namespace detail
template <class Tuple, class F>
CUTE_HOST_DEVICE constexpr
auto
transform_layout(Tuple const& t, F&& f)
{
return detail::transform_layout(t, f, make_seq<decltype(rank(t))::value>{});
}
template <class Tuple0, class Tuple1, class F>
CUTE_HOST_DEVICE constexpr
auto
transform_layout(Tuple0 const& t0, Tuple1 const& t1, F&& f)
{
constexpr int R0 = decltype(rank(t0))::value;
constexpr int R1 = decltype(rank(t1))::value;
constexpr int R = (R0 < R1) ? R0 : R1;
return detail::transform_layout(t0, t1, f, make_seq<R>{}, make_range<R,R0>{}, make_range<R,R1>{});
}
//
// Coalesce and Filter
//
namespace detail {
// Look at each element and the front of the stack (in order of priority)
// front(NewLayout) get<I>(Layout)
// s0:d0 _1:d1 => continue
// _1:d0 s1:d1 => replace_front s1:d1
// s0:s1*d1 s1:d1 => replace_front s0*s1:d1
// s0:d0 s1:d1 => prepend s1:d1
//
// @pre OldShape and OldStride are flat
template <int I, class OldShape, class OldStride, class NewShape, class NewStride>
CUTE_HOST_DEVICE constexpr
auto
bw_coalesce(OldShape const& old_shape, OldStride const& old_stride,
NewShape const& new_shape, NewStride const& new_stride)
{
if constexpr (I == -1) {
// Base case, we're done
if constexpr (is_constant<1, NewShape>::value) {
return Layout<_1,_0>{};
} else {
return Layout<NewShape,NewStride>{new_shape,new_stride};
}
} else if constexpr (is_constant<1, decltype(get<I>(old_shape))>::value) {
// shape<I>(layout) == _1, skip it and continue
return bw_coalesce<I-1>(old_shape, old_stride, new_shape, new_stride);
} else if constexpr (is_constant<1, NewShape>::value) {
// Replace our shape-1 with anything (Can only happen on input new_shape/new_stride)
return bw_coalesce<I-1>(old_shape, old_stride, get<I>(old_shape), get<I>(old_stride));
} else if constexpr (is_static<decltype(get<0>(new_shape))>::value &&
is_constant<true, decltype(get<I>(old_shape) * get<I>(old_stride) == get<0>(new_stride))>::value) {
// Merge modes because the shapes and strides match
return bw_coalesce<I-1>(old_shape, old_stride,
replace_front(new_shape, get<I>(old_shape) * get<0>(new_shape)),
replace_front(new_stride, get<I>(old_stride)));
} else {
// Can't replace or merge, so prepend a new mode
return bw_coalesce<I-1>(old_shape, old_stride,
prepend(new_shape, get<I>(old_shape)),
prepend(new_stride, get<I>(old_stride)));
}
CUTE_GCC_UNREACHABLE;
}
// cute::coalesce promises to not change the Layout as a function from integers to codomain.
// It accomplishes this inside of the Layout's domain, but not always outside of the domain.
// Example: (_4,_1):(_1,_0) coalesces to _4:_1.
// detail::coalesce_x preserves the Layout function inside its domain and outside.
//
// @post depth(@a result) <= 1
// @post for all i, 0 <= i, @a layout(i) == @a result(i)
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
coalesce_x(Layout<Shape,Stride> const& layout)
{
auto flat_shape = flatten(layout.shape());
auto flat_stride = flatten(layout.stride());
constexpr int R = decltype(rank(flat_shape))::value;
if constexpr (is_constant<1, decltype(get<R-1>(flat_shape))>::value) {
return detail::bw_coalesce<R-2>(flat_shape, flat_stride, Int<2>{}, get<R-1>(flat_stride));
} else {
return detail::bw_coalesce<R-2>(flat_shape, flat_stride, get<R-1>(flat_shape), get<R-1>(flat_stride));
}
}
// Apply coalesce_x at the terminals of trg_profile
template <class Shape, class Stride, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
coalesce_x(Layout<Shape,Stride> const& layout, IntTuple const& trg_profile)
{
if constexpr (is_tuple<IntTuple>::value) {
static_assert(tuple_size<IntTuple>::value <= Layout<Shape,Stride>::rank);
return cute::transform_layout(layout, trg_profile, [](auto const& l, auto const& t) { return coalesce_x(l,t); });
} else {
return coalesce_x(layout);
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
// "Simplify" the layout by combining modes that are possible to combine
// Does not respect the shape of the layout, but does preserve total size
// @post size(@a result) == size(@a layout)
// @post depth(@a result) <= 1
// @post for all i, 0 <= i < size(@a layout), @a layout(i) == @a result(i)
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
coalesce(Layout<Shape,Stride> const& layout)
{
auto flat_shape = flatten(layout.shape());
auto flat_stride = flatten(layout.stride());
constexpr int R = decltype(rank(flat_shape))::value;
return detail::bw_coalesce<R-2>(flat_shape, flat_stride, get<R-1>(flat_shape), get<R-1>(flat_stride));
}
// Apply coalesce at the terminals of trg_profile
template <class Shape, class Stride, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
coalesce(Layout<Shape,Stride> const& layout, IntTuple const& trg_profile)
{
if constexpr (is_tuple<IntTuple>::value) {
static_assert(tuple_size<IntTuple>::value <= Layout<Shape,Stride>::rank);
return transform_layout(layout, trg_profile, [](auto const& l, auto const& t) { return coalesce(l,t); });
} else {
return coalesce(layout);
}
CUTE_GCC_UNREACHABLE;
}
// Combine static and dynamic modes of a shape.
// @post size(@a result) == size(@a shape)
// @post depth(@a result) <= 1
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
coalesce(Shape const& shape)
{
static_assert(is_integral<Shape>::value || is_tuple<Shape>::value);
return cute::fold_first(flatten(shape), [](auto const& init, auto const& a) {
if constexpr (is_static<decltype(back(init))>::value == is_static<decltype(a)>::value) {
return replace_back(init, back(init) * a); // Both static or both dynamic, coalesce and replace
} else {
return append(init, a); // Can't coalesce, so append
}
});
}
// Replace the modes in layout that have a 0-stride with a 1-size
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
filter_zeros(Layout<Shape,Stride> const& layout)
{
return make_layout(filter_zeros(layout.stride(), layout.shape()), layout.stride());
}
// Remove all of the 0-strides and 1-sizes
// Return 1-shape if empty
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
filter(Layout<Shape,Stride> const& layout)
{
return coalesce(filter_zeros(layout));
}
// Apply filter at the terminals of trg_profile
template <class Shape, class Stride, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
filter(Layout<Shape,Stride> const& layout, IntTuple const& trg_profile)
{
if constexpr (is_tuple<IntTuple>::value) {
static_assert(tuple_size<IntTuple>::value <= Layout<Shape,Stride>::rank);
return transform_layout(layout, trg_profile, [](auto const& l, auto const& t) { return filter(l,t); });
} else {
return filter(layout);
}
CUTE_GCC_UNREACHABLE;
}
//
// Append, Prepend, Replace
//
template <int N, class ShapeA, class StrideA, class ShapeX = _1, class StrideX = _0>
CUTE_HOST_DEVICE constexpr
auto
append(Layout<ShapeA,StrideA> const& layout,
Layout<ShapeX,StrideX> const& x = {})
{
return make_layout(append<N>(layout.shape(), x.shape()),
append<N>(layout.stride(), x.stride()));
}
template <class ShapeA, class StrideA, class ShapeX = _1, class StrideX = _0>
CUTE_HOST_DEVICE constexpr
auto
append(Layout<ShapeA,StrideA> const& layout,
Layout<ShapeX,StrideX> const& x = {})
{
return make_layout(append(layout.shape(), x.shape()),
append(layout.stride(), x.stride()));
}
template <int N, class ShapeA, class StrideA, class ShapeX = _1, class StrideX = _0>
CUTE_HOST_DEVICE constexpr
auto
prepend(Layout<ShapeA,StrideA> const& layout,
Layout<ShapeX,StrideX> const& x = {})
{
return make_layout(prepend<N>(layout.shape(), x.shape()),
prepend<N>(layout.stride(), x.stride()));
}
template <class ShapeA, class StrideA, class ShapeX = _1, class StrideX = _0>
CUTE_HOST_DEVICE constexpr
auto
prepend(Layout<ShapeA,StrideA> const& layout,
Layout<ShapeX,StrideX> const& x = {})
{
return make_layout(prepend(layout.shape(), x.shape()),
prepend(layout.stride(), x.stride()));
}
template <int N, class ShapeA, class StrideA, class ShapeX, class StrideX>
CUTE_HOST_DEVICE constexpr
auto
replace(Layout<ShapeA,StrideA> const& layout,
Layout<ShapeX,StrideX> const& x)
{
return make_layout(replace<N>(layout.shape(), x.shape()),
replace<N>(layout.stride(), x.stride()));
}
template <int B, int E, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
group(Layout<Shape,Stride> const& layout)
{
return make_layout(group<B,E>(layout.shape()),
group<B,E>(layout.stride()));
}
//
// Composition of two layouts: lhs o rhs
// @post compatible(rhs, result)
// @post result(c) = lhs(rhs(c))
// for all c in the domain of rhs
//
namespace detail {
template <class LShape, class LStride,
class RShape, class RStride>
CUTE_HOST_DEVICE constexpr
auto
composition_impl(LShape const& lhs_shape, LStride const& lhs_stride,
RShape const& rhs_shape, RStride const& rhs_stride)
{
if constexpr (is_tuple<RShape>::value) {
// Apply the right-distributivity of Layout composition
return transform_layout(rhs_shape, rhs_stride, [&](auto const& s, auto const& d) {
return composition_impl(lhs_shape, lhs_stride, s, d);
});
} else
if constexpr (is_scaled_basis<RStride>::value) {
// Special case for a ScaledBasis stride
return composition_impl(basis_get(rhs_stride, lhs_shape), basis_get(rhs_stride, lhs_stride),
rhs_shape, basis_value(rhs_stride));
} else
if constexpr (is_constant<0, RStride>::value) {
// Special case shortcut for any static stride-0
return Layout<RShape, RStride>{rhs_shape, rhs_stride};
} else
if constexpr (is_integral<decltype(lhs_shape)>::value) {
// Special case shortcut for any integral LShape
return Layout{rhs_shape, rhs_stride * lhs_stride};
} else
if constexpr (is_constant<1, RStride>::value) {
// Special case shortcut for any static stride-1
constexpr int R = rank_v<LShape>;
auto result_shape_0 = take<0,R-1>(lhs_shape);
// Mod out the rhs_shape from the lhs_shape
auto const [result_shape_1, rest_shape] = fold(result_shape_0, cute::make_tuple(cute::make_tuple(), rhs_shape),
[] (auto const& init, auto const& si) {
return cute::make_tuple(append(get<0>(init), shape_min(abs(si), get<1>(init))), shape_div(get<1>(init), abs(si)));
});
// Jump into coalesce and append (rest_shape, get<R-1>(lhs_stride))
return detail::bw_coalesce<R-2>(result_shape_1, lhs_stride, rest_shape, get<R-1>(lhs_stride));
} else {
// General case: integral RShape and RStride, tuple LShape and LStride
constexpr int R = rank_v<LShape>;
auto result_shape_0 = take<0,R-1>(lhs_shape);
auto result_stride_0 = take<0,R-1>(lhs_stride);
// Divide out the rhs_stride from the lhs_shape
auto const [result_shape_1, rest_stride] = fold(result_shape_0, cute::make_tuple(cute::make_tuple(), rhs_stride),
[] (auto const& init, auto const& di) {
return cute::make_tuple(append(get<0>(init), shape_div(di, get<1>(init))), shape_div(get<1>(init), di));
});
// Apply any lhs_shape changes to the stride
auto result_stride_1 = elem_scale(result_stride_0, shape_div(result_shape_0, result_shape_1));
// Mod out the rhs_shape from the lhs_shape
auto const [result_shape_2, rest_shape] = fold(result_shape_1, cute::make_tuple(cute::make_tuple(), rhs_shape),
[] (auto const& init, auto const& si) {
return cute::make_tuple(append(get<0>(init), shape_min(abs(si), get<1>(init))), shape_div(get<1>(init), abs(si)));
});
// Jump into coalesce and append (rest_shape, rest_stride * get<R-1>(lhs_stride))
return detail::bw_coalesce<R-2>(result_shape_2, result_stride_1, rest_shape, rest_stride * get<R-1>(lhs_stride));
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
template <class LShape, class LStride,
class RShape, class RStride>
CUTE_HOST_DEVICE constexpr
auto
composition(Layout<LShape,LStride> const& lhs,
Layout<RShape,RStride> const& rhs)
{
auto coprofile = repeat_like(decltype(coshape(rhs)){}, Int<0>{});
auto flat_lhs = detail::coalesce_x(lhs, coprofile);
return detail::composition_impl(flat_lhs.shape(), flat_lhs.stride(), rhs.shape(), rhs.stride());
}
template <class LShape, class LStride, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
composition(Layout<LShape,LStride> const& lhs,
Tiler const& rhs)
{
if constexpr (is_tuple<Tiler>::value) {
static_assert(tuple_size<Tiler>::value <= Layout<LShape,LStride>::rank);
// Drop any modes of lhs that aren't hit by rhs
return detail::transform_layout(lhs, rhs, [](auto const& l, auto const& r) { return composition(l,r); }, make_seq<tuple_size<Tiler>::value>{}, seq<>{}, seq<>{});
} else if constexpr (is_underscore<Tiler>::value) {
return lhs;
} else if constexpr (is_integral<Tiler>::value) {
auto flat_lhs = detail::coalesce_x(lhs);
return detail::composition_impl(flat_lhs.shape(), flat_lhs.stride(), rhs, Int<1>{});
}
CUTE_GCC_UNREACHABLE;
}
//
// Complement
//
// Build the complement of a layout.
// @post size(@a result) >= @a cosize_hi / size(filter(@a layout)));
// @post For all i in [1,size(@a result)),
// @a result(i) < @a result(i-1)
// For all j in [0, size(@a layout)),
// @a result(i) != @a layout(j)
//
namespace detail {
// @pre @a layout has been filtered (flattened and no stride-0 or size-1 modes).
template <class Shape, class Stride, class CoTarget>
CUTE_HOST_DEVICE constexpr
auto
complement(Shape const& shape, Stride const& stride, CoTarget const& cotarget)
{
if constexpr (is_constant<0, Stride>::value) {
// Special case for irreducible rank-1 stride-0 layout
return make_layout(coalesce(cotarget));
} else {
// General case
constexpr int R = rank_v<Shape>;
static_assert(R == 1 || is_static<Stride>::value,
"Dynamic-stride complement only for rank-1 layouts");
// Should just be a sort and a fold...
// Then we could even handle dynamic strides (but they would destroy all static strides)
auto [shape_, stride_, result_shape_, result_stride] =
fold(make_seq<R-1>{},
cute::make_tuple(shape, stride, cute::make_tuple(), cute::make_tuple(Int<1>{})),
[](auto const& init, auto i)
{
auto [shape, stride, result_shape, result_stride] = init;
auto min_stride = cute::min(stride);
auto min_idx = cute::find(stride, min_stride);
auto new_shape = min_stride / get<i>(result_stride);
auto new_stride = min_stride * get<min_idx>(shape);
static_assert(not is_constant<0, decltype(new_shape)>::value, "Non-injective Layout detected in complement.");
return cute::make_tuple(remove<min_idx>(shape), // Remove the min_idx from shape
remove<min_idx>(stride), // Remove the min_idx from stride
append(result_shape , new_shape ), // new shape = min_stride / last_stride
append(result_stride, new_stride)); // new stride = min_stride * curr_shape
});
// Append the last shape mode
auto new_shape = get<0>(stride_) / get<R-1>(result_stride); // new shape = min_stride / last_stride
static_assert(not is_constant<0, decltype(new_shape)>::value, "Non-injective Layout detected in complement.");
auto result_shape = append(result_shape_, new_shape);
// Compute the rest_shape and rest_stride
auto new_stride = get<0>(stride_) * get<0>(shape_); // new stride = min_stride * curr_shape
auto rest_shape = coalesce(ceil_div(cotarget, new_stride));
auto rest_stride = compact_col_major(rest_shape, new_stride);
// Coalesce and append (rest_shape, rest_stride)
return coalesce(make_layout(make_shape (result_shape , rest_shape ),
make_stride(result_stride, rest_stride)));
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
template <class Shape, class Stride, class CoTarget>
CUTE_HOST_DEVICE constexpr
auto
complement(Layout<Shape,Stride> const& layout, CoTarget const& cotarget)
{
auto filter_layout = filter(layout);
return detail::complement(filter_layout.shape(), filter_layout.stride(), shape(cotarget));
}
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
complement(Layout<Shape,Stride> const& layout)
{
auto filter_layout = filter(layout);
return detail::complement(filter_layout.shape(), filter_layout.stride(), cosize(filter_layout));
}
//
// Right-Inverse and Left-Inverse
//
namespace detail {
template <int NextStride, class Shape, class Stride, int... Is>
CUTE_HOST_DEVICE constexpr
auto
inverse_seq(Shape const& shape, Stride const& stride, seq<Is...>)
{
auto next_I = cute::find_if(stride, [](auto a) { return is_constant<NextStride, decltype(a)>{}; });
if constexpr (next_I == decltype(rank(stride))::value) {
// If not found, return current seq
return seq<Is...>{};
} else {
// auto next_stride = get<next_I>(shape) * get<next_I>(stride);
// NOTE: Needed for g++-7
using next_stride = decltype(get<next_I>(shape) * get<next_I>(stride));
if constexpr (is_static<next_stride>::value && !is_constant<NextStride, next_stride>::value) {
// If next_stride is static and unique, then continue
return inverse_seq<next_stride::value>(shape, stride, seq<Is..., next_I>{});
} else {
// Else return current seq + next_I
return seq<Is..., next_I>{};
}
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
//
// Build the right-inverse of a layout
// @pre is_static<Layout>
// @result A layout @a result such that
// @a layout(@a result(i)) == i for all i < size(@a result)
// @result A layout @a result such that
// composition(@a layout, @a result) is identical to make_layout(shape(result))
//
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
right_inverse(Layout<Shape,Stride> const& layout)
{
auto flat_layout = coalesce(layout);
auto astride = transform_leaf(flat_layout.stride(), abs_fn{});
// Find Int<1>{}, the starting stride, and follow the strides to gen inverse_seq
[[maybe_unused]] auto iseq = detail::inverse_seq<1>(flat_layout.shape(), astride, seq<>{});
if constexpr (iseq.size() == 0) {
return Layout<_1,_0>{}; // Empty case, nothing found
} else {
// Generate the corresponding new strides and construct
auto rstride = compact_col_major(flat_layout.shape());
return make_layout(unwrap(transform(iseq, [&](auto i) { return shape<i>(flat_layout); })),
unwrap(transform(iseq, [&](auto i) { return signum(stride<i>(flat_layout)) * get<i>(rstride); })));
}
CUTE_GCC_UNREACHABLE;
}
CUTE_HOST_DEVICE constexpr
auto
right_inverse(Underscore const& _)
{
return _;
}
//
// Build the left-inverse of a layout
// @pre is_static<Layout>
// @pre @a layout is an injective function
// @result A layout @a result such that
// @a result(@a layout(i)) == i for all i < size(@a layout)
// @result A layout @a result such that
// composition(@a result, @a layout) is identical to make_layout(shape(layout))
//
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
left_inverse(Layout<Shape,Stride> const& layout)
{
return right_inverse(make_layout(layout, complement(layout)));
}
CUTE_HOST_DEVICE constexpr
auto
left_inverse(Underscore const& _)
{
return _;
}
//
// Max Common Layout
//
/* Return a layout that points to the maximum number of contiguous elements
* that logically correspond in the layouts of @a a and @a b.
*
* @returns Layout R
* @post For all 0 <= i < size(R), a(R(i)) == i and b(R(i)) == i
*/
template <class ShapeA, class StrideA,
class ShapeB, class StrideB>
CUTE_HOST_DEVICE constexpr
auto
max_common_layout(Layout<ShapeA,StrideA> const& a,
Layout<ShapeB,StrideB> const& b)
{
Layout inv_b = right_inverse(b);
Layout common = coalesce(composition(a, inv_b));
// Keep only the static identity component of the common layout
if constexpr (is_static<decltype(shape<0>(common))>::value &&
is_constant<1, decltype(stride<0>(common))>::value) {
// Truncate to the size of the contiguous vector (static stride-1 mode)
return composition(inv_b, layout<0>(common));
} else {
return Layout<_1,_0>{};
}
}
/* Return Int<N> such that N is the maximum number of contiguous elements
* that logically correspond in the layouts of @a a and @a b.
*
* @returns Int<N> with N >= 1
* @post For all 0 <= n < N, a(b.get_1d_coord(n)) == n
* (NOTE: Problems with negative strides/coords in this post-condition)
*/
template <class ShapeA, class StrideA,
class ShapeB, class StrideB>
CUTE_HOST_DEVICE constexpr
auto
max_common_vector(Layout<ShapeA,StrideA> const& a,
Layout<ShapeB,StrideB> const& b)
{
Layout common = coalesce(composition(a, right_inverse(b)));
// Keep only the static identity component of the common layout
if constexpr (is_static<decltype(shape<0>(common))>::value &&
is_constant<1, decltype(stride<0>(common))>::value) {
// Truncate to the size of the contiguous vector (static stride-1 mode)
return shape<0>(common);
} else {
return Int<1>{};
}
CUTE_GCC_UNREACHABLE;
}
//
// Kernel (Nullspace) of a Layout
//
namespace detail {
template <int NextI, class Stride, int... Is>
CUTE_HOST_DEVICE constexpr
auto
nullspace_seq(Stride const& stride, seq<Is...>)
{
if constexpr (NextI == rank_v<Stride>) {
return seq<Is...>{};
} else
if constexpr (is_constant<0, decltype(get<NextI>(stride))>::value) {
return detail::nullspace_seq<NextI+1>(stride, seq<Is..., NextI>{});
} else {
return detail::nullspace_seq<NextI+1>(stride, seq<Is...>{});
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
//
// Build the nullspace of a layout
// @result A layout @a result such that
// size(@a result) == size(@a layout) / size(filter(@a layout))
// @a layout(@a result(i)) == 0 for all i < size(@a result)
//
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
nullspace(Layout<Shape,Stride> const& layout)
{
auto flat_layout = flatten(layout);
auto iseq = detail::nullspace_seq<0>(flat_layout.stride(), seq<>{});
if constexpr (iseq.size() == 0) {
return Layout<_1,_0>{}; // Empty case, nothing found
} else {
// Generate the corresponding new strides and construct
auto rstride = compact_col_major(flat_layout.shape());
return make_layout(unwrap(transform(iseq, [&](auto i) { return shape<i>(flat_layout); })),
unwrap(transform(iseq, [&](auto i) { return get<i>(rstride); })));
}
CUTE_GCC_UNREACHABLE;
}
//
// Zip
//
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
zip(Layout<Shape,Stride> const& layout)
{
return make_layout(zip(layout.shape()),
zip(layout.stride()));
}
template <class TShape, class TStride,
class UShape, class UStride>
CUTE_HOST_DEVICE constexpr
auto
zip(Layout<TShape,TStride> const& layoutA,
Layout<UShape,UStride> const& layoutB)
{
return make_layout(zip(layoutA.shape(), layoutB.shape()),
zip(layoutA.stride(), layoutB.stride()));
}
//
// Tile unzip
// Logical product and logical divide (on layouts) produce rank-2 results by design.
// Follow the profile of @a tile and zip the rank-2 modes located at the terminals into
// their own mode.
//
template <class LShape, class LStride, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
tile_unzip(Layout<LShape,LStride> const& layout,
Tiler const& tiler)
{
return make_layout(zip2_by(layout.shape(), tiler),
zip2_by(layout.stride(), tiler));
}
//
// Logical divide
//
template <class LShape, class LStride,
class TShape, class TStride>
CUTE_HOST_DEVICE constexpr
auto
logical_divide(Layout<LShape,LStride> const& layout,
Layout<TShape,TStride> const& tiler)
{
return composition(layout, make_layout(tiler, complement(tiler, shape(layout))));
}
template <class LShape, class LStride, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
logical_divide(Layout<LShape,LStride> const& layout,
Tiler const& tiler)
{
if constexpr (is_tuple<Tiler>::value) {
static_assert(tuple_size<Tiler>::value <= Layout<LShape,LStride>::rank, "logical_divide: Too many modes in tiler.");
return transform_layout(layout, tiler, [](auto const& l, auto const& t) { return logical_divide(l,t); });
} else if constexpr (is_underscore<Tiler>::value) {
return layout;
} else if constexpr (is_integral<Tiler>::value) {
return logical_divide(layout, make_layout(tiler));
}
CUTE_GCC_UNREACHABLE;
}
// Generalization of ceil_div for Layout lhs
// is effectively the "rest mode" of logical_divide.
// Occurs in the calculation of gridDim, for example, for generalized tilers
// Example:
// dim3 gridDim(size(ceil_div(problem_shape_M, cta_tiler_M)),
// size(ceil_div(problem_shape_N, cta_tiler_N)));
// This does not consider compositional acceptance, so it may be the case that
// ceil_div produces a result while logical_divide (and friends) do not.
template <class Target, class TShape, class TStride>
CUTE_HOST_DEVICE constexpr
auto
ceil_div(Target const& target,
Layout<TShape,TStride> const& tiler)
{
return complement(tiler, size(target));
}
//
// Convenience operator
// that produces layouts like ((BLK_A,BLK_B,...),(a,b,...,x,y))
// by gathering the tile modes and residuals into a rank-2 result.
//
template <class LShape, class LStride,
class Tiler>
CUTE_HOST_DEVICE constexpr
auto
zipped_divide(Layout<LShape,LStride> const& layout,
Tiler const& tiler)
{
return tile_unzip(logical_divide(layout, tiler), tiler);
}
// Same as zipped_divide, but unpacks the second mode: ((BLK_A,BLK_B,...),a,b,...,x,y)
template <class LShape, class LStride,
class Tiler>
CUTE_HOST_DEVICE constexpr
auto
tiled_divide(Layout<LShape,LStride> const& layout,
Tiler const& tiler)
{
auto result = zipped_divide(layout, tiler);
auto R1 = rank<1>(result);
return result(_, repeat<R1>(_));
}
// Same as zipped_divide, but unpacks both modes: (BLK_A,BLK_B,...,a,b,...,x,y)
template <class LShape, class LStride,
class Tiler>
CUTE_HOST_DEVICE constexpr
auto
flat_divide(Layout<LShape,LStride> const& layout,
Tiler const& tiler)
{
auto result = zipped_divide(layout, tiler);
auto R0 = rank<0>(result);
auto R1 = rank<1>(result);
return result(repeat<R0>(_), repeat<R1>(_));
}
//
// Logical product
//
template <class LShape, class LStride,
class TShape, class TStride>
CUTE_HOST_DEVICE constexpr
auto
logical_product(Layout<LShape,LStride> const& block,
Layout<TShape,TStride> const& tiler)
{
return make_layout(block, composition(complement(block, size(block)*cosize(tiler)), tiler));
}
template <class LShape, class LStride, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
logical_product(Layout<LShape,LStride> const& block,
Tiler const& tiler)
{
if constexpr (is_tuple<Tiler>::value) {
static_assert(tuple_size<Tiler>::value <= Layout<LShape,LStride>::rank, "logical_product: Too many modes in tiler.");
return transform_layout(block, tiler, [](auto const& l, auto const& t) { return logical_product(l,t); });
} else if constexpr (is_underscore<Tiler>::value) {
return block;
} else if constexpr (is_integral<Tiler>::value) {
return logical_product(block, make_layout(tiler));
}
CUTE_GCC_UNREACHABLE;
}
//
// Convenience operator
// that produces layouts like ((BLK_A,BLK_B,...),(a,b,...,x,y))
// by gathering the block modes and products into a rank-2 result.
//
template <class LShape, class LStride,
class Tiler>
CUTE_HOST_DEVICE constexpr
auto
zipped_product(Layout<LShape,LStride> const& block,
Tiler const& tiler)
{
return tile_unzip(logical_product(block, tiler), tiler);
}
// Same as zipped_product, but unpacks the second mode: ((BLK_A,BLK_B,...),a,b,...,x,y)
template <class LShape, class LStride,
class Tiler>
CUTE_HOST_DEVICE constexpr
auto
tiled_product(Layout<LShape,LStride> const& block,
Tiler const& tiler)
{
auto result = zipped_product(block, tiler);
auto R1 = rank<1>(result);
return result(_, repeat<R1>(_));
}
// Same as zipped_product, but unpacks both modes: (BLK_A,BLK_B,...,a,b,...,x,y)
template <class LShape, class LStride,
class Tiler>
CUTE_HOST_DEVICE constexpr
auto
flat_product(Layout<LShape,LStride> const& block,
Tiler const& tiler)
{
auto result = zipped_product(block, tiler);
auto R0 = rank<0>(result);
auto R1 = rank<1>(result);
return result(repeat<R0>(_), repeat<R1>(_));
}
//
// Rank-sensitive products
//
// blocked_product -- Reproduce a block over a tiler.
// Think of every element of "tiler" as a "block"
// and return the layout of the resulting structure.
// @post rank(@a result) == cute::max(rank(@a block), rank(@a tiler))
template <class TShape, class TStride,
class UShape, class UStride>
CUTE_HOST_DEVICE constexpr
auto
blocked_product(Layout<TShape,TStride> const& block,
Layout<UShape,UStride> const& tiler)
{
constexpr int R = cute::max(rank_v<TShape>, rank_v<UShape>);
auto result = logical_product(append<R>(block), append<R>(tiler));
return coalesce(zip(get<0>(result), get<1>(result)), tuple_repeat<R>(Int<1>{}));
}
// raked_product -- Reproduce a block over a tiler with block-interleaving.
// Think of every element of "tiler" as a "block", interleave those blocks,
// and return the layout of the resulting structure.
// @post rank(@a result) == cute::max(rank(@a block), rank(@a tiler))
template <class TShape, class TStride,
class UShape, class UStride>
CUTE_HOST_DEVICE constexpr
auto
raked_product(Layout<TShape,TStride> const& block,
Layout<UShape,UStride> const& tiler)
{
constexpr int R = cute::max(rank_v<TShape>, rank_v<UShape>);
auto result = logical_product(append<R>(block), append<R>(tiler));
return coalesce(zip(get<1>(result), get<0>(result)), tuple_repeat<R>(Int<1>{}));
}
// tile_to_shape -- Perform a product of a layout so that the result matches a target shape.
// This is similar to blocked_product, but specifies the result shape instead of the
// product shape, which is more convenient in certain circumstances.
// @param block The layout to repeat
// @param trg_shape The target shape of the result
// @param ord_shape The order of the modes of @a trg_shape to tile @a layout with.
// Defaults to GenColMajor, so @a layout will repeat
// across the first mode first, the second mode second, etc
// E.g. Step<_2,_1,_3> will cause @a layout to repeat
// across the second mode first, the first mode second, and the third mode last.
// @pre rank(@a block) <= rank(@a trg_shape)
// @post compatible(@a trg_shape, shape(@a result))
template <class Shape, class Stride,
class TrgShape, class ModeOrder = LayoutLeft>
CUTE_HOST_DEVICE constexpr
auto
tile_to_shape(Layout<Shape,Stride> const& block,
TrgShape const& trg_shape,
ModeOrder const& ord_shape = {})
{
CUTE_STATIC_ASSERT_V(rank(block) <= rank(trg_shape), "Rank of layout must be <= rank of target shape.");
constexpr int R = rank_v<TrgShape>;
auto padded_block = append<R>(block);
auto block_shape = product_each(shape(padded_block));
auto target_shape = product_each(shape(trg_shape));
// Assert proper division
if constexpr (is_static<decltype(target_shape)>::value) {
CUTE_STATIC_ASSERT_V(weakly_compatible(block_shape, target_shape),
"tile_to_shape: block shape does not divide the target shape.");
}
auto product_shape = ceil_div(target_shape, block_shape);
return coalesce(blocked_product(padded_block, make_ordered_layout(product_shape, ord_shape)), product_shape);
}
//
// Upcast
// For stride-1 mode, divide size by N. Divide all other strides by N.
//
template <int N, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
upcast(Shape const& shape, Stride const& stride)
{
if constexpr (is_tuple<Shape>::value) { // tuple stride
return transform_layout(shape, stride, [](auto const& s, auto const& d) { return upcast<N>(s,d); });
} else if constexpr (is_constant<0, Stride>::value) { // static-0 stride
return Layout<Shape,Stride>{shape,stride};
} else if constexpr (is_static<Stride>::value) { // static stride
return make_layout(shape_div(shape, shape_div(Int<N>{}, abs(stride))),
shape_div(stride, Int<N>{}));
} else { // dynamic stride
// assume dynamic strides are larger than N and divisible
// assert(stride % N == 0);
return make_layout(shape, safe_div(stride, Int<N>{}));
}
CUTE_GCC_UNREACHABLE;
}
template <int N, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
upcast(Layout<Shape,Stride> const& layout)
{
return upcast<N>(layout.shape(), layout.stride());
}
//
// Downcast
// For stride-1 mode, multiply size by N. Multiply all other strides by N.
//
template <int N, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
downcast(Shape const& shape, Stride const& stride)
{
if constexpr (is_tuple<Shape>::value) {
return transform_layout(shape, stride, [](auto const& s, auto const& d) { return downcast<N>(s,d); });
} else if constexpr (is_constant<1, Stride>::value || is_constant<-1, Stride>::value) {
return make_layout(shape * Int<N>{}, stride);
} else {
return make_layout(shape, stride * Int<N>{});
}
CUTE_GCC_UNREACHABLE;
}
template <int N, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
downcast(Layout<Shape,Stride> const& layout)
{
CUTE_STATIC_ASSERT(has_int1<Stride>::value, "Downcast requires adjacent elements");
return downcast<N>(layout.shape(), layout.stride());
}
//
// Recast
//
template <class OldType, class NewType,
class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
recast_layout(Layout<Shape,Stride> const& layout)
{
using scale = decltype(trait_ratio(sizeof_bits<NewType>{}, sizeof_bits<OldType>{}));
if constexpr (scale::num == 1 && scale::den == 1) {
return layout;
}
else if constexpr (scale::num == 1) {
return downcast<scale::den>(layout);
}
else if constexpr (scale::den == 1) {
return upcast<scale::num>(layout);
}
else {
static_assert(dependent_false<scale>, "Recast not supported.");
}
CUTE_GCC_UNREACHABLE;
}
//
// Display utilities
//
template <class Shape, class Stride>
CUTE_HOST_DEVICE void print(Layout<Shape,Stride> const& layout)
{
print(layout.shape()); print(":"); print(layout.stride());
}
#if !defined(__CUDACC_RTC__)
template <class Shape, class Stride>
CUTE_HOST std::ostream& operator<<(std::ostream& os, Layout<Shape,Stride> const& layout)
{
return os << shape(layout) << ":" << stride(layout);
}
#endif
// Generic 2D Layout to console table
template <class Layout>
CUTE_HOST_DEVICE
void
print_layout(Layout const& layout) // (m,n) -> idx
{
CUTE_STATIC_ASSERT_V(rank(layout) == Int<2>{});
int idx_width = num_digits(cosize(layout)) + 2;
const char* delim = "+-----------------------";
print(layout); print("\n");
// Column indices
print(" ");
for (int n = 0; n < size<1>(layout); ++n) { printf(" %*d ", idx_width-2, n); }
printf("\n");
// Print out A m-by-n
for (int m = 0; m < size<0>(layout); ++m) {
// Header
print(" ");
for (int n = 0; n < size<1>(layout); ++n) { printf("%.*s", idx_width+1, delim); }
printf("+\n");
// Values
printf("%2d ", m); // Row indices
for (int n = 0; n < size<1>(layout); ++n) { printf("| %*d ", idx_width-2, int(layout(m,n))); }
printf("|\n");
}
// Footer
print(" ");
for (int n = 0; n < size<1>(layout); ++n) { printf("%.*s", idx_width+1, delim); }
printf("+\n");
}
// Generic ThrVal 2D Layout to console table
template <class Layout, class ThrID>
CUTE_HOST_DEVICE
void
print_layout(Layout const& layout, ThrID const& thrid) // (m,n) -> (tid,vid) and tid -> thr_idx
{
CUTE_STATIC_ASSERT_V(rank(layout) == Int<2>{});
print(layout); print("\n");
print(thrid); print("\n");
// Print out m-by-n
for (int m = 0; m < size<0>(layout); ++m) {
// Header
for (int n = 0; n < size<1>(layout); ++n) printf("+------");
printf("+\n");
// Values
for (int n = 0; n < size<1>(layout); ++n) printf("|%03d-%02d", int(thrid(layout(m,n) % size(thrid))), int(layout(m,n) / size(thrid)));
printf("|\n");
}
// Footer
for (int n = 0; n < size<1>(layout); ++n) printf("+------");
printf("+\n");
}
// Generic 2D Layout to Latex printer -- B&W 8-value color coding
template <class LayoutA>
CUTE_HOST_DEVICE
void
print_latex(LayoutA const& layout_a)
{
CUTE_STATIC_ASSERT_V(rank(layout_a) <= Int<2>{});
auto layout = append<2>(layout_a, Layout<_1,_0>{});
char const* latex_header =
"\\documentclass[convert]{standalone}\n"
"\\usepackage{tikz}\n\n"
"\\begin{document}\n"
"\\begin{tikzpicture}[x={(0cm,-1cm)},y={(1cm,0cm)},box/.style={rectangle,draw=black,thick,minimum size=1cm,anchor=center,font=\\Large}]\n\n";
char const* latex_footer =
"\\end{tikzpicture}\n"
"\\end{document}\n";
char const* color_map[8] = {"black!00",
"black!40",
"black!20",
"black!60",
"black!10",
"black!50",
"black!30",
"black!70"};
// Header
printf("%% Layout: "); print(layout); printf("\n");
printf(latex_header);
// Layout
for (int i = 0; i < size<0>(layout); ++i) {
for (int j = 0; j < size<1>(layout); ++j) {
int idx = layout(i,j);
printf("\\node[box,fill=%s] at (%d,%d) {%d};\n",
color_map[idx % 8],
i, j,
idx);
}
}
// Labels
for (int i = 0, j = -1; i < size<0>(layout); ++i) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, i);
}
for (int j = 0, i = -1; j < size<1>(layout); ++j) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, j);
}
// Footer
printf(latex_footer);
}
// Generic ThrVal 2D Layout to Latex TIKZ -- 8-value color coded by thread
template <class Layout, class ThrID>
CUTE_HOST_DEVICE
void
print_latex(Layout const& layout, ThrID const& thr) // (m,n) -> (tid,vid) and tid -> thr_idx
{
CUTE_STATIC_ASSERT_V(rank(layout) == Int<2>{});
char const* latex_header =
"\\documentclass[convert]{standalone}\n"
"\\usepackage{tikz}\n\n"
"\\begin{document}\n"
"\\begin{tikzpicture}[x={(0cm,-1cm)},y={(1cm,0cm)},box/.style={rectangle,draw=black,thick,minimum size=1cm,anchor=center}]\n\n";
char const* latex_footer =
"\\end{tikzpicture}\n"
"\\end{document}\n";
char const* color_map[8] = {"{rgb,255:red,175;green,175;blue,255}",
"{rgb,255:red,175;green,255;blue,175}",
"{rgb,255:red,255;green,255;blue,175}",
"{rgb,255:red,255;green,175;blue,175}",
"{rgb,255:red,210;green,210;blue,255}",
"{rgb,255:red,210;green,255;blue,210}",
"{rgb,255:red,255;green,255;blue,210}",
"{rgb,255:red,255;green,210;blue,210}"};
// Header
printf("%% layout: "); print(layout); printf("\n");
printf("%% thrid: "); print(thr); printf("\n\n");
printf(latex_header);
// Layout
for (int i = 0; i < size<0>(layout); ++i) {
for (int j = 0; j < size<1>(layout); ++j) {
int thrid = layout(i,j) % size(thr);
int val_idx = layout(i,j) / size(thr);
int thr_idx = thr(thrid);
printf("\\node[box,fill=%s] at (%d,%d) {\\shortstack{T%d \\\\ V%d}};\n",
color_map[thr_idx % 8],
i, j,
thr_idx, val_idx);
}
}
// Labels
for (int i = 0, j = -1; i < size<0>(layout); ++i) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, i);
}
for (int j = 0, i = -1; j < size<1>(layout); ++j) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, j);
}
// Footer
printf(latex_footer);
}
} // end namespace cute
//
// Extended Layouts
//
#include <cute/swizzle_layout.hpp>
| include/cute/layout.hpp/0 | {
"file_path": "include/cute/layout.hpp",
"repo_id": "include",
"token_count": 24507
} | 17 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/container/tuple.hpp>
#include <cute/algorithm/tuple_algorithms.hpp>
#include <cute/numeric/integer_sequence.hpp>
#include <cute/numeric/integral_constant.hpp>
#include <cute/numeric/math.hpp>
namespace cute
{
// A generic Swizzle functor
/* 0bxxxxxxxxxxxxxxxYYYxxxxxxxZZZxxxx
* ^--^ MBase is the number of least-sig bits to keep constant
* ^-^ ^-^ BBits is the number of bits in the mask
* ^---------^ SShift is the distance to shift the YYY mask
* (pos shifts YYY to the right, neg shifts YYY to the left)
*
* e.g. Given
* 0bxxxxxxxxxxxxxxxxYYxxxxxxxxxZZxxx
* the result is
* 0bxxxxxxxxxxxxxxxxYYxxxxxxxxxAAxxx where AA = ZZ xor YY
*/
template <int BBits, int MBase, int SShift = BBits>
struct Swizzle
{
static constexpr int num_bits = BBits;
static constexpr int num_base = MBase;
static constexpr int num_shft = SShift;
static_assert(num_base >= 0, "MBase must be positive.");
static_assert(num_bits >= 0, "BBits must be positive.");
static_assert(abs(num_shft) >= num_bits, "abs(SShift) must be more than BBits.");
// using 'int' type here to avoid unintentially casting to unsigned... unsure.
using bit_msk = cute::constant<int, (1 << num_bits) - 1>;
using yyy_msk = cute::constant<int, bit_msk{} << (num_base + max(0,num_shft))>;
using zzz_msk = cute::constant<int, bit_msk{} << (num_base - min(0,num_shft))>;
using msk_sft = cute::constant<int, num_shft>;
static constexpr uint32_t swizzle_code = uint32_t(yyy_msk{} | zzz_msk{});
template <class Offset>
CUTE_HOST_DEVICE constexpr static
auto
apply(Offset const& offset)
{
return offset ^ shiftr(offset & yyy_msk{}, msk_sft{}); // ZZZ ^= YYY
}
template <class Offset>
CUTE_HOST_DEVICE constexpr
auto
operator()(Offset const& offset) const
{
return apply(offset);
}
template <int B, int M, int S>
CUTE_HOST_DEVICE constexpr
auto
operator==(Swizzle<B,M,S> const&) const
{
return B == BBits && M == MBase && S == SShift;
}
};
//
// make_swizzle<0b1000, 0b0100>() -> Swizzle<1,2,1>
// make_swizzle<0b11000000, 0b00000110>() -> Swizzle<2,1,5>
//
template <uint32_t Y, uint32_t Z>
CUTE_HOST_DEVICE constexpr
auto
make_swizzle()
{
constexpr uint32_t BZ = popcount(Y); // Number of swizzle bits
constexpr uint32_t BY = popcount(Z); // Number of swizzle bits
static_assert(BZ == BY, "Number of bits in Y and Z don't match");
constexpr uint32_t TZ_Y = countr_zero(Y); // Number of trailing zeros in Y
constexpr uint32_t TZ_Z = countr_zero(Z); // Number of trailing zeros in Z
constexpr uint32_t M = cute::min(TZ_Y, TZ_Z) % 32;
constexpr int32_t S = int32_t(TZ_Y) - int32_t(TZ_Z); // Difference in trailing zeros
static_assert((Y | Z) == Swizzle<BZ,M,S>::swizzle_code, "Something went wrong.");
return Swizzle<BZ,M,S>{};
}
template <int B0, int M0, int S0,
int B1, int M1, int S1>
CUTE_HOST_DEVICE constexpr
auto
composition(Swizzle<B0,M0,S0>, Swizzle<B1,M1,S1>)
{
static_assert(S0 == S1, "Can only merge swizzles of the same shift.");
constexpr uint32_t Y = Swizzle<B0,M0,S0>::yyy_msk::value ^ Swizzle<B1,M1,S1>::yyy_msk::value;
constexpr uint32_t Z = Swizzle<B0,M0,S0>::zzz_msk::value ^ Swizzle<B1,M1,S1>::zzz_msk::value;
return make_swizzle<Y,Z>();
//return ComposedFn<Swizzle<B0,M0,S0>, Swizzle<B1,M1,S1>>{};
}
//
// Utility for slicing and swizzle "offsets"
//
// For swizzle functions, it is often needed to keep track of which bits are
// consumed and which bits are free. Furthermore, it is useful to know whether
// each of these bits is known statically or dynamically.
// MixedBits is an 32-bit unsigned integer class where some bits are known statically
// and some bits are known dynamically. These sets of bits are disjoint and it is
// known statically which bits are known dynamically.
// MixedBits can only be manipulated through bitwise operations
// Abstract value: StaticInt | (dynamic_int_ & StaticFlags)
template <uint32_t StaticInt,
uint32_t StaticFlags> // 0: static, 1: dynamic
struct MixedBits
{
// Representation invariants
static_assert(StaticFlags != 0, "Should be at least one dynamic bit in MixedBits.");
static_assert((StaticInt & StaticFlags) == 0, "No static/dynamic overlap allowed in MixedBits.");
uint32_t dynamic_int_;
// assert((dynamic_int_ & ~StaticFlags) == 0);
CUTE_HOST_DEVICE constexpr operator uint32_t() const noexcept { return StaticInt | dynamic_int_; }
};
// Return a value representing (C<s>{} | (d & C<f>)) potentially using MixedBits to track s and f.
// This maker does allow ((s & f) != 0) and enforces the MixedBits invariant before creation.
template <auto s, class DynamicType, auto f>
CUTE_HOST_DEVICE constexpr
auto
make_mixed_bits(C<s>, DynamicType const& d, C<f>)
{
static_assert(is_integral<DynamicType>::value);
constexpr uint32_t new_f = uint32_t(f) & ~uint32_t(s); // StaticBits take precedence, M<0,f>{d} | C<s>{}
if constexpr (new_f == 0 || is_static<DynamicType>::value) {
return C<s>{} | (d & C<new_f>{}); // Just return a static int
} else {
return MixedBits<s, new_f>{uint32_t(d) & new_f}; // MixedBits
}
CUTE_GCC_UNREACHABLE;
}
//
// Operators
//
// Equality
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator==(MixedBits<S0,F0> const& m, C<S1>)
{
return (S0 == (uint32_t(S1) & ~F0)) && (m.dynamic_int_ == (uint32_t(S1) & F0));
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator==(C<S1> s, MixedBits<S0,F0> const& m)
{
return m == s;
}
// Bitwise AND
template <uint32_t S0, uint32_t F0,
uint32_t S1, uint32_t F1>
CUTE_HOST_DEVICE constexpr
auto
operator&(MixedBits<S0,F0> const& m0, MixedBits<S1,F1> const& m1)
{
// Truth table for (S0,D0,F0) & (S1,D1,F1) -> (S,D,F)
// S0D0F0 | 0X0 | 001 | 011 | 1X0 |
// S1D1F1
// 0X0 | 0X0 | 0X0 | 0X0 | 0X0 |
// 001 | 0X0 | 001 | 001 | 001 |
// 011 | 0X0 | 001 | 011 | 011 |
// 1X0 | 0X0 | 001 | 011 | 1X0 |
return make_mixed_bits(C<S0 & S1>{},
//(S0 | m0.dynamic_int_) & (S1 | m1.dynamic_int_),
((S1 & F0) & m0.dynamic_int_) | ((S0 & F1) & m1.dynamic_int_) | (m0.dynamic_int_ & m1.dynamic_int_),
C<(S1 & F0) | (S0 & F1) | (F0 & F1)>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator&(MixedBits<S0,F0> const& m, C<S1>)
{
return make_mixed_bits(C<S0 & uint32_t(S1)>{},
m.dynamic_int_,
C<F0 & uint32_t(S1)>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator&(C<S1> s, MixedBits<S0,F0> const& m)
{
return m & s;
}
// Bitwise OR
template <uint32_t S0, uint32_t F0,
uint32_t S1, uint32_t F1>
CUTE_HOST_DEVICE constexpr
auto
operator|(MixedBits<S0,F0> const& m0, MixedBits<S1,F1> const& m1)
{
// Truth table for (S0,D0,F0) | (S1,D1,F1) -> (S,D,F)
// S0D0F0 | 0X0 | 001 | 011 | 1X0 |
// S1D1F1
// 0X0 | 0X0 | 001 | 011 | 1X0 |
// 001 | 001 | 001 | 011 | 1X0 |
// 011 | 011 | 011 | 011 | 1X0 |
// 1X0 | 1X0 | 1X0 | 1X0 | 1X0 |
return make_mixed_bits(C<S0 | S1>{},
((~S1 & F0) & m0.dynamic_int_) | ((~S0 & F1) & m1.dynamic_int_),
C<(~S0 & F1) | (~S1 & F0)>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator|(MixedBits<S0,F0> const& m, C<S1>)
{
return make_mixed_bits(C<S0 | uint32_t(S1)>{},
m.dynamic_int_,
C<F0 & ~uint32_t(S1)>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator|(C<S1> s, MixedBits<S0,F0> const& m)
{
return m | s;
}
// Bitwise XOR
template <uint32_t S0, uint32_t F0,
uint32_t S1, uint32_t F1>
CUTE_HOST_DEVICE constexpr
auto
operator^(MixedBits<S0,F0> const& m0, MixedBits<S1,F1> const& m1)
{
// Truth table for (S0,D0,F0) ^ (S1,D1,F1) -> (S,D,F)
// S0D0F0 | 0X0 | 001 | 011 | 1X0 |
// S1D1F1
// 0X0 | 0X0 | 001 | 011 | 1X0 |
// 001 | 001 | 001 | 011 | 011 |
// 011 | 011 | 011 | 001 | 001 |
// 1X0 | 1X0 | 011 | 001 | 0X0 |
return make_mixed_bits(C<(~S0 & S1 & ~F0) | (S0 & ~S1 & ~F1)>{},
(S0 | m0.dynamic_int_) ^ (S1 | m1.dynamic_int_),
C<F0 | F1>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator^(MixedBits<S0,F0> const& m, C<S1>)
{
return make_mixed_bits(C<(~S0 & uint32_t(S1) & ~F0) | (S0 & ~uint32_t(S1))>{},
(S0 | m.dynamic_int_) ^ uint32_t(S1),
C<F0>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator^(C<S1> s, MixedBits<S0,F0> const& m)
{
return m ^ s;
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator<<(MixedBits<S0,F0> const& m, C<S1>)
{
return make_mixed_bits(C<(S0 << S1)>{},
m.dynamic_int_ << S1,
C<(F0 << S1)>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator>>(MixedBits<S0,F0> const& m, C<S1>)
{
return make_mixed_bits(C<(S0 >> S1)>{},
m.dynamic_int_ >> S1,
C<(F0 >> S1)>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
shiftl(MixedBits<S0,F0> const& m, C<S1> s)
{
if constexpr (S1 >= 0) {
return m << s;
} else {
return m >> -s;
}
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
shiftr(MixedBits<S0,F0> const& m, C<S1> s)
{
if constexpr (S1 >= 0) {
return m >> s;
} else {
return m << -s;
}
}
//
// upcast and downcast
//
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
safe_div(MixedBits<S0,F0> const& m, C<S1> s)
{
static_assert(has_single_bit(uint32_t(S1)), "Only divide MixedBits by powers of two.");
return make_mixed_bits(safe_div(C<S0>{}, s),
safe_div(m.dynamic_int_, s),
safe_div(C<F0>{}, s));
}
template <uint32_t N, uint32_t S0, uint32_t F0>
CUTE_HOST_DEVICE constexpr
auto
upcast(MixedBits<S0,F0> const& m)
{
static_assert(has_single_bit(N), "Only divide MixedBits by powers of two.");
return safe_div(m, C<N>{});
}
template <uint32_t N, class T, __CUTE_REQUIRES(cute::is_integral<T>::value)>
CUTE_HOST_DEVICE constexpr
auto
upcast(T const& m)
{
return safe_div(m, C<N>{});
}
template <uint32_t N, uint32_t S0, uint32_t F0>
CUTE_HOST_DEVICE constexpr
auto
downcast(MixedBits<S0,F0> const& m)
{
static_assert(has_single_bit(N), "Only scale MixedBits by powers of two.");
return make_mixed_bits(C<S0 * N>{},
m.dynamic_int_ * N,
C<F0 * N>{});
}
template <uint32_t N, class T, __CUTE_REQUIRES(cute::is_integral<T>::value)>
CUTE_HOST_DEVICE constexpr
auto
downcast(T const& m)
{
return m * C<N>{};
}
//
// Convert a Pow2Layout+Coord to a MixedBits
//
template <class Shape, class Stride, class Coord>
CUTE_HOST_DEVICE constexpr
auto
to_mixed_bits(Shape const& shape, Stride const& stride, Coord const& coord)
{
if constexpr (is_tuple<Shape>::value && is_tuple<Stride>::value && is_tuple<Coord>::value) {
static_assert(tuple_size<Shape>::value == tuple_size<Stride>::value, "Mismatched ranks");
static_assert(tuple_size<Shape>::value == tuple_size<Coord >::value, "Mismatched ranks");
return transform_apply(shape, stride, coord, [](auto const& s, auto const& d, auto const& c) { return to_mixed_bits(s,d,c); },
[](auto const&... a) { return (a ^ ...); });
} else if constexpr (is_integral<Shape>::value && is_integral<Stride>::value && is_integral<Coord>::value) {
static_assert(decltype(shape*stride)::value == 0 || has_single_bit(decltype(shape*stride)::value), "Requires pow2 shape*stride.");
return make_mixed_bits(Int<0>{}, coord * stride, (shape - Int<1>{}) * stride);
} else {
static_assert(is_integral<Shape>::value && is_integral<Stride>::value && is_integral<Coord>::value, "Either Shape, Stride, and Coord must be all tuples, or they must be all integral (in the sense of cute::is_integral).");
}
CUTE_GCC_UNREACHABLE;
}
template <class Layout, class Coord>
CUTE_HOST_DEVICE constexpr
auto
to_mixed_bits(Layout const& layout, Coord const& coord)
{
return to_mixed_bits(layout.shape(), layout.stride(), idx2crd(coord, layout.shape()));
}
//
// Display utilities
//
template <int B, int M, int S>
CUTE_HOST_DEVICE void print(Swizzle<B,M,S> const&)
{
printf("Sw<%d,%d,%d>", B, M, S);
}
template <uint32_t S, uint32_t F>
CUTE_HOST_DEVICE void print(MixedBits<S,F> const& m)
{
printf("M_%u|(%u&%u)=%u", S, m.dynamic_int_, F, uint32_t(m));
}
#if !defined(__CUDACC_RTC__)
template <int B, int M, int S>
CUTE_HOST std::ostream& operator<<(std::ostream& os, Swizzle<B,M,S> const&)
{
return os << "Sw<" << B << "," << M << "," << S << ">";
}
template <uint32_t S, class D, uint32_t F>
CUTE_HOST std::ostream& operator<<(std::ostream& os, MixedBits<S,F> const& m)
{
return os << "M_" << S << "|(" << m.dynamic_int_ << "&" << F << ")=" << uint32_t(m);
}
#endif // !defined(__CUDACC_RTC__)
} // end namespace cute
| include/cute/swizzle.hpp/0 | {
"file_path": "include/cute/swizzle.hpp",
"repo_id": "include",
"token_count": 6999
} | 18 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#include "cutlass/arch/mma.h"
#include "cutlass/complex.h"
#include "cutlass/quaternion.h"
#include "cutlass/functional.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, float, LayoutA, float, LayoutB, float, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = float;
CUTLASS_HOST_DEVICE
void operator()(
Array<float, 1> &d,
Array<float, 1> const &a,
Array<float, 1> const &b,
Array<float, 1> const &c
) {
d[0] = a[0] * b[0] + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, double, LayoutA, double, LayoutB, double, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = double;
CUTLASS_HOST_DEVICE
void operator()(
Array<double, 1> &d,
Array<double, 1> const &a,
Array<double, 1> const &b,
Array<double, 1> const &c
) {
d[0] = a[0] * b[0] + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, int, LayoutA, int, LayoutB, int, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = int;
CUTLASS_HOST_DEVICE
void operator()(
Array<int, 1> &d,
Array<int, 1> const &a,
Array<int, 1> const &b,
Array<int, 1> const &c
) {
d[0] = a[0] * b[0] + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<float>,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<complex<float>, 1> const &a,
Array<complex<float>, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0].real() * b[0].real() + c[0].real();
d[0].imag() = a[0].imag() * b[0].real() + c[0].imag();
d[0].real() = -a[0].imag() * b[0].imag() + d[0].real();
d[0].imag() = a[0].real() * b[0].imag() + d[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<float>,
LayoutA,
float,
LayoutB,
complex<float>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<complex<float>, 1> const &a,
Array<float, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0].real() * b[0] + c[0].real();
d[0].imag() = a[0].imag() * b[0] + c[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
float,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<float, 1> const &a,
Array<complex<float>, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0] * b[0].real() + c[0].real();
d[0].imag() = a[0] * b[0].imag() + d[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<double>,
LayoutA,
complex<double>,
LayoutB,
complex<double>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<double>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<double>, 1> &d,
Array<complex<double>, 1> const &a,
Array<complex<double>, 1> const &b,
Array<complex<double>, 1> const &c
) {
d[0].real() = a[0].real() * b[0].real() + c[0].real();
d[0].imag() = a[0].imag() * b[0].real() + c[0].imag();
d[0].real() = -a[0].imag() * b[0].imag() + d[0].real();
d[0].imag() = a[0].real() * b[0].imag() + d[0].imag();
}
};
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<double>,
LayoutA,
double,
LayoutB,
complex<double>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<double>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<double>, 1> &d,
Array<complex<double>, 1> const &a,
Array<double, 1> const &b,
Array<complex<double>, 1> const &c
) {
d[0].real() = a[0].real() * b[0] + c[0].real();
d[0].imag() = a[0].imag() * b[0] + c[0].imag();
}
};
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
double,
LayoutA,
complex<double>,
LayoutB,
complex<double>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<double>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<double>, 1> &d,
Array<double, 1> const &a,
Array<complex<double>, 1> const &b,
Array<complex<double>, 1> const &c
) {
d[0].real() = a[0] * b[0].real() + c[0].real();
d[0].imag() = a[0] * b[0].imag() + d[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, half_t, LayoutA, half_t, LayoutB, float, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = float;
CUTLASS_HOST_DEVICE
void operator()(
Array<float, 1> &d,
Array<half_t, 1> const &a,
Array<half_t, 1> const &b,
Array<float, 1> const &c
) {
d[0] = float(a[0]) * float(b[0]) + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation for Quaternions
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, Quaternion<float>, LayoutA, Quaternion<float>, LayoutB, Quaternion<float>, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using Element = Quaternion<float>;
using ElementC = Element;
CUTLASS_HOST_DEVICE
void operator()(
Array<Element, 1> &d,
Array<Element, 1> const &a,
Array<Element, 1> const &b,
Array<Element, 1> const &c
) {
multiply_add<Element, Element, Element> op;
d[0] = op(a[0], b[0], c[0]);
}
};
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/arch/mma_sm50.h/0 | {
"file_path": "include/cutlass/arch/mma_sm50.h",
"repo_id": "include",
"token_count": 3887
} | 19 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for int8_t
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
int8_t, ///< ElementA
LayoutA_, ///< LayoutA
int8_t, ///< ElementB
LayoutB_, ///< LayoutB
int32_t, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM72_ENABLED)
using Shape = Shape_;
using ElementA = int8_t;
using LayoutA = LayoutA_;
using ElementB = int8_t;
using LayoutB = LayoutB_;
using ElementC = int32_t;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpMultiplyAdd;
using ArchTag = arch::Sm72;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<16, 16, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape< 8, 32, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape<32, 8, 16>, Shape>::value,
"Supported list of wmma operator shape for s8 multiplicands are: 16x16x16, 8x32x16, and 32x8x16");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::mma_sync(D, A, B, C);
}
#else
static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM72 and beyond");
#endif
};
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for uint8_t
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
uint8_t, ///< ElementA
LayoutA_, ///< LayoutA
uint8_t, ///< ElementB
LayoutB_, ///< LayoutB
int32_t, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM72_ENABLED)
using Shape = Shape_;
using ElementA = uint8_t;
using LayoutA = LayoutA_;
using ElementB = uint8_t;
using LayoutB = LayoutB_;
using ElementC = int32_t;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpMultiplyAdd;
using ArchTag = arch::Sm72;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<16, 16, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape< 8, 32, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape<32, 8, 16>, Shape>::value,
"Supported list of wmma operator shape for u8 multiplicands are: 16x16x16, 8x32x16, and 32x8x16");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::mma_sync(D, A, B, C);
}
#else
static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM72 and beyond");
#endif
};
} // namespace arch
} // namespace cutlass
| include/cutlass/arch/wmma_sm72.h/0 | {
"file_path": "include/cutlass/arch/wmma_sm72.h",
"repo_id": "include",
"token_count": 3101
} | 20 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level fused activation's scale+bias+relu and implicit GEMM convolution
definitions that combine threadblock-scoped matrix multiply-add with the
appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/predicated_scale_bias_vector_access_iterator.h"
#include "cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h"
#include "cutlass/gemm/warp/scale_bias_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for fused batch norm and Conv2dFprop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementScaleBias,
typename LayoutScaleBias,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kUnity
> struct DefaultConv2dFpropFusion;
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassTensorOp convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementScaleBias,
typename LayoutScaleBias,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag
>
struct DefaultConv2dFpropFusion <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementScaleBias,
LayoutScaleBias,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
/// Define iterators over tiles from scale/bias vectors
using IteratorScaleBias =
cutlass::conv::threadblock::PredicatedScaleBiasVectorAccessIterator<
cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias,
LayoutScaleBias>;
using SmemIteratorScaleBias =
cutlass::transform::threadblock::RegularScaleBiasVectorAccessIterator<
cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias,
LayoutScaleBias>;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static int const kThreadCount = 32;
// Warp-level iterators to load scale and bias vectors
using WarpIteratorScaleBias = cutlass::gemm::warp::ScaleBiasTileIterator<
MatrixShape<WarpShape::kM, WarpShape::kK>, ElementScaleBias,
LayoutScaleBias, MatrixShape<InstructionShape::kM, InstructionShape::kK>,
typename WarpMmaTensorOp::IteratorA::Base::Policy, kThreadCount,
MmaCore::WarpCount::kK>;
// Define the Mma
using Mma = threadblock::ImplicitGemmFpropFusionMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Global,
IteratorScaleBias,
SmemIteratorScaleBias,
arch::CacheOperation::Always,
MmaPolicy,
WarpIteratorScaleBias,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
1,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionFusion<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and
/// multistage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementScaleBias,
typename LayoutScaleBias,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag
>
struct DefaultConv2dFpropFusion <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementScaleBias,
LayoutScaleBias,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag
>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
/// Define iterators over tiles from scale/bias vectors
using IteratorScaleBias =
cutlass::conv::threadblock::PredicatedScaleBiasVectorAccessIterator<
cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias,
LayoutScaleBias>;
using SmemIteratorScaleBias =
cutlass::transform::threadblock::RegularScaleBiasVectorAccessIterator<
cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias,
LayoutScaleBias>;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static int const kThreadCount = 32;
// Warp-level iterators to load scale and bias vectors
using WarpIteratorScaleBias = cutlass::gemm::warp::ScaleBiasTileIterator<
MatrixShape<WarpShape::kM, WarpShape::kK>, ElementScaleBias,
LayoutScaleBias, MatrixShape<InstructionShape::kM, InstructionShape::kK>,
typename WarpMmaTensorOp::IteratorA::Base::Policy, kThreadCount,
MmaCore::WarpCount::kK>;
// Define the Mma
using Mma = threadblock::ImplicitGemmFpropFusionMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Global,
IteratorScaleBias,
SmemIteratorScaleBias,
arch::CacheOperation::Always,
MmaPolicy,
WarpIteratorScaleBias,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
1,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionFusion<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_conv2d_fprop_fusion.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_conv2d_fprop_fusion.h",
"repo_id": "include",
"token_count": 3829
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level Depthwise implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/kernel/direct_convolution.h"
#include "cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/depthwise_fprop_pipelined.h"
// Direct Conv Related Header files
#include "cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_optimized.h"
#include "cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_fixed_stride_dilation.h"
#include "cutlass/conv/threadblock/depthwise_fprop_filter_tile_access_iterator_direct_conv_optimized.h"
#include "cutlass/conv/threadblock/depthwise_fprop_direct_conv_multistage.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for DepthwiseFprop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
conv::StrideSupport StrideSupport = StrideSupport::kUnity,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = cutlass::sizeof_bits<ElementB>::value / cutlass::sizeof_bits<ElementB>::value
> struct DefaultDepthwiseFprop;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for DepthwiseFprop with direct convolution algorithm
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename ThreadBlockOutputShape,
typename FilterShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
conv::StrideSupport StrideSupport = StrideSupport::kUnity,
// MatrixShape<Height, Width>
typename StrideShape = cutlass::MatrixShape<-1, -1>,
// MatrixShape< Height, Width>
typename DilationShape = cutlass::MatrixShape<-1, -1>,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
> struct DefaultDepthwiseDirect2dConvFprop;
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Depthwise specialization for Analytic IteratorAlgorithm
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultDepthwiseFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag, // cutlass::arch::OpMultiplyAdd
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::conv::threadblock::DepthwiseMmaCoreWithLaneAccessSize<
ThreadblockShape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementAccumulator,
layout::RowMajor,
arch::OpClassSimt,
128,
sizeof_bits<ElementB>::value,
2,
MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB,
cutlass::conv::GroupMode::kDepthwise
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::DepthwiseFpropPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv2dProblemSize,
cutlass::conv::GroupMode::kDepthwise
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Depthwise specialization for direct 2d conv implementation,
/// multiple stage pipeline, and SIMT-based mainloop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename ThreadBlockOutputShape,
typename FilterShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
typename StrideShape,
typename DilationShape,
int AlignmentA,
int AlignmentB
>
struct DefaultDepthwiseDirect2dConvFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
StrideShape,
DilationShape,
AlignmentA,
AlignmentB
> {
// One warp handles the entrie groups per cta.
static_assert(ThreadblockShape::kN == WarpShape::kN,
"ThreadblockShape::kN should be same as WarpShape::kN ");
static_assert(ThreadblockShape::kK == FilterShape::kCount && WarpShape::kK == FilterShape::kCount,
"ThreadblockShape::kK and WarpShape::kK should be same as filter size");
static_assert(ThreadblockShape::kM % WarpShape::kM == 0,
"ThreadblockShape::kM must be divisible by WarpShape shape::kM");
static_assert(ThreadBlockOutputShape::kN, "ThreadBlockOutputShape::kN should be 1");
// Define the core components from GEMM
using MmaCore = typename cutlass::conv::threadblock::DepthwiseDirectConvMmaCoreWithLaneAccessSize<
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementAccumulator,
layout::RowMajor,
arch::OpClassSimt,
128,
128,
Stages,
MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM,ThreadblockShape::kN>, // < outputShape:KMNK, groups per cta>
ThreadBlockOutputShape,
ElementA, LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kN, FilterShape::kCount>,
ElementB, LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
using ThreadOutputShape = typename MmaCore::ThreadOutputShape;
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * AlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultDirectConvEpilogueSimt<
ThreadblockShape, // < outputShape:KMNK, groups per cta>
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
ThreadOutputShape,
ThreadBlockOutputShape
>::Epilogue;
// Define the Mma
using Mma = threadblock::DepthwiseFpropDirectConvMultipleStage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
CacheOpA,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages,
Epilogue
>;
// Define the kernel
using Kernel = cutlass::conv::kernel::DirectConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv2dProblemSize,
cutlass::conv::GroupMode::kDepthwise,
ThreadBlockOutputShape
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Depthwise specialization for direct 2d conv implementation,
/// multiple stage pipeline, and SIMT-based mainloop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename ThreadBlockOutputShape,
typename FilterShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
typename StrideShape,
typename DilationShape,
int AlignmentA,
int AlignmentB
>
struct DefaultDepthwiseDirect2dConvFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kFixedStrideDilation,
StrideSupport,
StrideShape,
DilationShape,
AlignmentA,
AlignmentB
> {
// One warp handles the entrie groups per cta.
static_assert(ThreadblockShape::kN == WarpShape::kN,
"ThreadblockShape::kN should be same as WarpShape::kN ");
static_assert(ThreadblockShape::kK == FilterShape::kCount && WarpShape::kK == FilterShape::kCount,
"ThreadblockShape::kK and WarpShape::kK should be same as filter size");
static_assert(ThreadblockShape::kM % WarpShape::kM == 0,
"ThreadblockShape::kM must be divisible by WarpShape shape::kM");
static_assert(ThreadBlockOutputShape::kN, "ThreadBlockOutputShape::kN should be 1");
static_assert(StrideShape::kRow >= 0 && StrideShape::kColumn >= 0, "Stride should be fixed");
static_assert(DilationShape::kRow >= 0 && DilationShape::kColumn >= 0, "Stride should be fixed");
// Activations loaded by threadblock
static int const ActivationShapeH = (ThreadBlockOutputShape::kH - 1) * StrideShape::kRow +
(FilterShape::kRow - 1) * DilationShape::kRow + 1;
static int const ActivationShapeW = (ThreadBlockOutputShape::kW - 1) * StrideShape::kColumn +
(FilterShape::kColumn - 1) * DilationShape::kColumn + 1;
using ActivationShape =
cutlass::conv::TensorNHWCShape<1, ActivationShapeH, ActivationShapeW, ThreadblockShape::kN >;
// Define the core components from GEMM
using MmaCore = typename cutlass::conv::threadblock::DepthwiseDirectConvMmaCoreWithLaneAccessSize<
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementAccumulator,
layout::RowMajor,
arch::OpClassSimt,
128,
128,
Stages,
MathOperatorTag,
IteratorAlgorithm::kFixedStrideDilation,
StrideShape,
DilationShape,
ActivationShape>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation<
cutlass::MatrixShape<ThreadblockShape::kM,ThreadblockShape::kN>, // < outputShape:KMNK, groups per cta>
ThreadBlockOutputShape,
StrideShape,
DilationShape,
ActivationShape,
ElementA, LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kN, FilterShape::kCount>,
ElementB, LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
using ThreadOutputShape = typename MmaCore::ThreadOutputShape;
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * AlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultDirectConvEpilogueSimt<
ThreadblockShape, // < outputShape:KMNK, groups per cta>
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
ThreadOutputShape,
ThreadBlockOutputShape
>::Epilogue;
// Define the Mma
using Mma = threadblock::DepthwiseFpropDirectConvMultipleStage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
CacheOpA,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages,
Epilogue,
IteratorAlgorithm::kFixedStrideDilation
>;
// Define the kernel
using Kernel = cutlass::conv::kernel::DirectConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv2dProblemSize,
cutlass::conv::GroupMode::kDepthwise,
ThreadBlockOutputShape
>;
};
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_depthwise_fprop.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_depthwise_fprop.h",
"repo_id": "include",
"token_count": 6502
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorNCxHWx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dFpropActivationTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
using Mask = uint64_t;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dFpropActivationIteratorOptimizedParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
// current filter position (r, s)
int filter_r_;
int filter_s_;
int filter_c_;
Index masks_[ThreadMap::Iterations::kStrided][kAccessesPerVector][2];
public:
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorOptimized(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
filter_c_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.column() + thread_coord.contiguous();
int offset_n[ThreadMap::Iterations::kStrided];
int offset_p[ThreadMap::Iterations::kStrided];
int offset_q[ThreadMap::Iterations::kStrided];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_npq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// offset_n[s] = offset_npq / (problem_size_.P * problem_size_.Q);
// int residual = offset_npq % (problem_size_.P * problem_size_.Q);
//
// offset_p[s] = residual / problem_size_.Q;
// offset_q[s] = residual % problem_size_.Q;
//
int residual;
params.pq_divmod(offset_n[s], residual, offset_npq);
params.q_divmod(offset_p[s], offset_q[s], residual);
TensorCoord coord = at_(offset_n[s], offset_p[s], offset_q[s], 0, 0);
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
clear_mask();
CUTLASS_PRAGMA_NO_UNROLL
for (int r = 0; r < problem_size_.R; ++r) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int r_ = r;
if (problem_size_.mode == Mode::kConvolution) {
r_ = problem_size_.R - 1 - r;
}
int h = offset_p[s_idx] * problem_size_.stride_h - problem_size_.pad_h + r_ * problem_size_.dilation_h;
bool pred = (offset_n[s_idx] < problem_size_.N && h >= 0 && h < problem_size_.H);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][0] |= (pred << r);
}
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int s = 0; s < problem_size_.S; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int s_ = s;
if (problem_size_.mode == Mode::kConvolution) {
s_ = problem_size_.S - 1 - s;
}
int w = offset_q[s_idx] * problem_size_.stride_w - problem_size_.pad_w + s_ * problem_size_.dilation_w;
bool pred = (w >= 0 && w < problem_size_.W);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][1] |= (pred << s);
}
}
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= problem_size_.C);
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided});
}
private:
/// Returns the coordinate in the activations tensor X that is correspoinding to
// output npq and filter position r, s
CUTLASS_HOST_DEVICE
TensorCoord at_(int n, int p, int q, int r, int s) const {
if (problem_size_.mode == Mode::kConvolution) {
r = problem_size_.R - 1 - r;
s = problem_size_.S - 1 - s;
}
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
return TensorCoord(n, h, w, filter_c_);
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset;
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_HOST_DEVICE
void advance() {
int next_idx = 0;
// moves to the next tile
++filter_s_;
if (filter_s_ == problem_size_.S) {
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
next_idx = 1;
}
else {
filter_r_ = 0;
next_idx = 2;
}
}
add_byte_offset_(params_.inc_next[next_idx]);
if (next_idx == 2) {
filter_c_ += params_.filter_c_delta;
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= problem_size_.C);
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
masks_[s][v][0] = clear ? 0 : masks_[s][v][0];
masks_[s][v][1] = clear ? 0 : masks_[s][v][1];
}
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(int v, bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][v][0] = clear ? 0 : masks_[s][v][0];
masks_[s][v][1] = clear ? 0 : masks_[s][v][1];
}
}
CUTLASS_HOST_DEVICE
bool valid() {
return
(masks_[iteration_strided_][iteration_vector_][0] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][iteration_vector_][1] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if ((problem_size.C / problem_size.groups) % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorNCxHWx<32>>::value) {
if (problem_size.C % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorNCxHWx<64>>::value) {
if (problem_size.C % 64) {
return Status::kErrorInvalidProblem;
}
}
// Conv2dFpropActivationTileAccessIteratorOptimized has constraint on filter positions
// due to the number of mask bits.
if (problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h",
"repo_id": "include",
"token_count": 5193
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_
>
class Conv3dFpropActivationTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
using Mask = uint64_t;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv3dFpropActivationIteratorOptimizedParams<Layout>;
private:
Conv3dFpropActivationIteratorOptimizedParams<Layout> const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
// current filter position (t, r, s)
int filter_t_;
int filter_r_;
int filter_s_;
int filter_c_;
// mask for t, r, and s
Index masks_[ThreadMap::Iterations::kStrided][3];
public:
CUTLASS_HOST_DEVICE
Conv3dFpropActivationTileAccessIteratorOptimized(
Conv3dFpropActivationIteratorOptimizedParams<Layout> const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
) :
params_(params),
problem_size_(problem_size),
filter_t_(0),
filter_r_(0),
filter_s_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.column() + thread_coord.contiguous();
int offset_n[ThreadMap::Iterations::kStrided];
int offset_z[ThreadMap::Iterations::kStrided];
int offset_p[ThreadMap::Iterations::kStrided];
int offset_q[ThreadMap::Iterations::kStrided];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_nzpq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// offset_n[s] = offset_nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q);
// int residual = offset_nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q);
//
// offset_z[s] = residual / (problem_size_.P * problem_size_.Q);
// residual = residual % (problem_size_.P * problem_size_.Q);
//
// offset_p[s] = residual / problem_size_.Q;
// offset_q[s] = residual % problem_size_.Q;
//
int residual;
// input: (nzpq offset) output: (n offset and resudial (zpq offset))
params.zpq_divmod(offset_n[s], residual, offset_nzpq);
// input: (zpq offset) output: (z offset and resudial (pq))
params.pq_divmod(offset_z[s], residual, residual);
// input: (pq offset) output: (p offset and resudial (q offset))
params.q_divmod(offset_p[s], offset_q[s], residual);
TensorCoord coord = at_(offset_n[s], offset_z[s], offset_p[s], offset_q[s], 0, 0, 0);
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
clear_mask();
// mask predicates for filter position T
CUTLASS_PRAGMA_NO_UNROLL
for (int t = 0; t < problem_size_.T; ++t) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int t_ = t;
if (problem_size_.mode == Mode::kConvolution) {
t_ = problem_size_.T - 1 - t;
}
int d = offset_z[s_idx] * problem_size_.stride_d - problem_size_.pad_d + t_ * problem_size_.dilation_d;
bool pred = (offset_n[s_idx] < problem_size_.N && d >= 0 && d < problem_size_.D);
masks_[s_idx][0] |= (pred << t);
}
}
// mask predicates for filter position R
CUTLASS_PRAGMA_NO_UNROLL
for (int r = 0; r < problem_size_.R; ++r) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int r_ = r;
if (problem_size_.mode == Mode::kConvolution) {
r_ = problem_size_.R - 1 - r;
}
int h = offset_p[s_idx] * problem_size_.stride_h - problem_size_.pad_h + r_ * problem_size_.dilation_h;
bool pred = (h >= 0 && h < problem_size_.H);
masks_[s_idx][1] |= (pred << r);
}
}
// mask predicates for filter position S
CUTLASS_PRAGMA_NO_UNROLL
for (int s = 0; s < problem_size_.S; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int s_ = s;
if (problem_size_.mode == Mode::kConvolution) {
s_ = problem_size_.S - 1 - s;
}
int w = offset_q[s_idx] * problem_size_.stride_w - problem_size_.pad_w + s_ * problem_size_.dilation_w;
bool pred = (w >= 0 && w < problem_size_.W);
masks_[s_idx][2] |= (pred << s);
}
}
if (filter_c_ >= problem_size.C) {
clear_mask();
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided});
}
private:
/// Returns the coordinate in the activations tensor X that is correspoinding to
// output nzpq and filter position t, r, s
CUTLASS_HOST_DEVICE
TensorCoord at_(int n, int z, int p, int q, int t, int r, int s) const {
if (problem_size_.mode == Mode::kConvolution) {
t = problem_size_.T - 1 - t;
r = problem_size_.R - 1 - r;
s = problem_size_.S - 1 - s;
}
int d = z * problem_size_.stride_d - problem_size_.pad_d + t * problem_size_.dilation_d;
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
return TensorCoord(n, d, h, w, filter_c_);
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset;
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask_(bool clear) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
// We are using inline PTX assembly here to avoid an CUDA C++ compilation
// artifact in which control flow instructions are generated. Instead, our
// intent is to predicate the mov instructions.
#if defined(__CUDA_ARCH__)
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][0])
:
"r"((int)clear),
"r"(masks_[s][0])
);
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][1])
:
"r"((int)clear),
"r"(masks_[s][1])
);
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][2])
:
"r"((int)clear),
"r"(masks_[s][2])
);
#else
if (clear) {
masks_[s][0] = 0;
masks_[s][1] = 0;
masks_[s][2] = 0;
}
#endif
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_HOST_DEVICE
void advance() {
int next_idx = 0;
// moves to the next tile
++filter_s_;
if (filter_s_ == problem_size_.S) {
filter_s_ = 0;
++filter_r_;
next_idx = 1;
if (filter_r_ == problem_size_.R) {
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
next_idx = 2;
}
else {
filter_t_ = 0;
next_idx = 3;
}
}
}
add_byte_offset_(params_.inc_next[next_idx]);
if (next_idx == 3) {
filter_c_ += params_.filter_c_delta;
}
clear_mask_(filter_c_ >= problem_size_.C);
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask() {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][0] = Mask(0);
masks_[s][1] = Mask(0);
masks_[s][2] = Mask(0);
}
}
CUTLASS_HOST_DEVICE
bool valid() {
return
(masks_[iteration_strided_][0] & (Index(1) << filter_t_)) &&
(masks_[iteration_strided_][1] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][2] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dFpropActivationTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
// Conv3dFpropActivationTileAccessIteratorOptimized has constraint on filter positions
// due to the number of mask bits.
if (problem_size.T > 32 || problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h",
"repo_id": "include",
"token_count": 6154
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a multistage threadblock-scoped fused activation's
scale+bias+relu and Implicit GEMM Convolution kernel.
The original implicit gemm will store out-of-bound data as zeroes in the
shared memory because zeros into the tensor core, zeroes out of the tensor
cores. The result is remained the same. When fusing scale+bias+relu
into the mainloop, it is no longer true because
0 x scale + bias = bias
which is no longer always 0. So, instead of storing zeroes, this fused
kernel stores the out-of-bound data as a special NaN (0x7eff), when applying
scale+bias+relu, the code is like
if (data == 0x7eff)
data = 0;
else
data = scale+bias+relu(data, scale, bias);
See include/cutlass/conv/warp/scale_bias_relu_transformation.h for the
elementwise computation. See include/cutlass/arch/memory_sm80.h for nan fill.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/cache_operation.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/scale_bias_tile_iterator.h"
#include "cutlass/conv/warp/scale_bias_relu_transform.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Element type of scale and bias vectors
typename ElementScaleBias_,
/// Layout of scale and bias vectors
typename LayoutScaleBias_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// WarpIterator to load Scale or Bias vector from the shared memory
typename WarpIteratorScaleBias_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class MmaFpropFusionBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Element type of scale and bias vectors
using ElementScaleBias = ElementScaleBias_;
/// Layout of scale and bias vectors
using LayoutScaleBias = LayoutScaleBias_;
///< Policy describing tuning details
using Policy = Policy_;
///< WarpIterator to load Scale or Bias vector from the shared memory
using WarpIteratorScaleBias = WarpIteratorScaleBias_;
//
// Dependent types
//
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = cutlass::gemm::GemmShape<Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);
/// Number of stages
static int const kStages = Stages;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;
/// Tensor reference to the scale and bias vectors
using TensorRefScaleBias = TensorRef<ElementScaleBias, LayoutScaleBias>;
/// Tensor reference to the B operand
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;
static_assert(kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
static_assert((kWarpGemmIterations % 2) == 0,
"Inner loop iteration must be an even number.");
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
class SharedStorage {
public:
//
// Type definitions
//
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,
Shape::kK * kStages +
Policy::SmemPaddingA::kColumn>;
/// Shape of the A scale and bias vectors in shared memory
using ShapeScaleBias =
MatrixShape<1 + Policy::SmemPaddingA::kRow,
2 * Shape::kK * kStages + Policy::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB =
MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow,
Shape::kN + Policy::SmemPaddingB::kColumn>;
public:
//
// Data members
//
/// Buffer for A operand
AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A;
/// Buffer for B operand
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;
/// Buffer for A operand Scale and Bias
AlignedBuffer<ElementScaleBias, ShapeScaleBias::kCount> operand_A_scale_bias;
public:
//
// Methods
//
/// Returns a layout object for the A matrix
CUTLASS_DEVICE
static typename Operator::LayoutA LayoutA() {
return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator::LayoutB LayoutB() {
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});
}
/// Returns a layout object for the A scale and bias vectors
CUTLASS_DEVICE
static LayoutScaleBias LayoutScaleBias() {
return LayoutScaleBias::packed(
{ShapeScaleBias::kRow, ShapeScaleBias::kColumn});
}
/// Returns a TensorRef to the A operand
CUTLASS_HOST_DEVICE
TensorRefA operand_A_ref() {
return TensorRefA{operand_A.data(), LayoutA()};
}
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB operand_B_ref() {
return TensorRefB{operand_B.data(), LayoutB()};
}
/// Returns a TensorRef to the A operand Scale vector
CUTLASS_HOST_DEVICE
TensorRefScaleBias operand_A_scale_bias_ref() {
return TensorRefScaleBias{operand_A_scale_bias.data(), LayoutScaleBias()};
}
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of A operand scale and bias vector
/// from shared memory
WarpIteratorScaleBias warp_tile_iterator_A_scale_bias_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator::IteratorB warp_tile_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaFpropFusionBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx),
warp_tile_iterator_A_scale_bias_(
shared_storage.operand_A_scale_bias_ref(), lane_idx),
warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Iterates over vectors of scale and bias vector in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorScaleBias_,
/// Iterates over vectors of scale and bias vector in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorScaleBias_,
/// Cache operation for scale/bias operand
cutlass::arch::CacheOperation::Kind CacheOpScaleBias,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// WarpIterator to load Scale or Bias vector from the shared memory
typename WarpIteratorScaleBias_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class ImplicitGemmFpropFusionMultistage
: public MmaFpropFusionBase<Shape_, typename IteratorScaleBias_::Element,
typename IteratorScaleBias_::Layout, Policy_,
WarpIteratorScaleBias_, Stages> {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Iterates over tiles of the scale and bias vectors in global memory
using IteratorScaleBias = IteratorScaleBias_;
///< WarpIterator to load Scale or Bias vector from the shared memory
using WarpIteratorScaleBias = WarpIteratorScaleBias_;
///< Policy describing tuning details
using Policy = Policy_;
///< Base class
using Base = MmaFpropFusionBase<Shape_, typename IteratorScaleBias::Element,
typename IteratorScaleBias::Layout, Policy,
WarpIteratorScaleBias, Stages>;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
using SmemIteratorScaleBias = SmemIteratorScaleBias_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static cutlass::arch::CacheOperation::Kind const kCacheOpScaleBias =
CacheOpScaleBias;
//
// Dependent types
//
/// Fragment of accumulator tile
using ElementC = typename Policy::Operator::ElementC;
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Internal structure exposed for introspection.
struct Detail {
static_assert(Base::kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
};
private:
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpLoadedFragmentScaleBias =
typename WarpIteratorScaleBias::Fragment;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of A operand scale vector to shared memory
SmemIteratorScaleBias smem_iterator_A_scale_bias_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
ImplicitGemmFpropFusionMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_A_scale_bias_(shared_storage.operand_A_scale_bias_ref(),
thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_A_scale_bias_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A,
IteratorScaleBias &iterator_A_scale_bias,
IteratorB &iterator_B, int group_start_A = 0,
int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / 8;
// Uses nan fill for out of bound data
cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpA>(
dst_ptr, iterator_A.get(), iterator_A.valid());
++iterator_A;
++this->smem_iterator_A_;
}
}
// Async Copy for operand A scale and bias vector. Scale and bias vectors
// are small. One iteration is enough.
if (group_start_A == 0) {
typename IteratorScaleBias::AccessType *dst_ptr =
reinterpret_cast<typename IteratorScaleBias::AccessType *>(
this->smem_iterator_A_scale_bias_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorScaleBias::Element>::value *
IteratorScaleBias::kElementsPerAccess / 8;
cutlass::arch::cp_async<kSrcBytes, kCacheOpScaleBias>(
dst_ptr, iterator_A_scale_bias.get(), iterator_A_scale_bias.valid());
}
iterator_B.set_iteration_index(group_start_B);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr, iterator_B.get(), iterator_B.valid());
++iterator_B;
++this->smem_iterator_B_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< iterator over scale and bias vectors in global memory
IteratorScaleBias iterator_A_scale_bias,
///< initial value of accumulator
FragmentC const &src_accum,
///< number of iterations per channel
int gemm_k_iterations_per_channel = 0,
///< Imaginary strides used for planar-complex only - ignored here
int64_t imag_stride_A = 0,
int64_t imag_stride_B = 0) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / 8;
// Uses Nan fill for out of bound data
cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpA>(
dst_ptr, iterator_A.get(), iterator_A.valid());
++iterator_A;
++this->smem_iterator_A_;
}
// Async Copy for operand A scale and bias vectors. Scale and bias
// vectors are small. One iteration is enough.
{
typename IteratorScaleBias::AccessType *dst_ptr =
reinterpret_cast<typename IteratorScaleBias::AccessType *>(
this->smem_iterator_A_scale_bias_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorScaleBias::Element>::value *
IteratorScaleBias::kElementsPerAccess / 8;
cutlass::arch::cp_async<kSrcBytes, kCacheOpScaleBias>(
dst_ptr, iterator_A_scale_bias.get(), iterator_A_scale_bias.valid());
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr, iterator_B.get(), iterator_B.valid());
++iterator_B;
++this->smem_iterator_B_;
}
// Move to the next stage
iterator_A.advance();
iterator_A_scale_bias.advance();
iterator_B.advance();
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_A_scale_bias_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Waits until kStages-2 stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[2];
WarpLoadedFragmentB warp_loaded_frag_B[2];
WarpLoadedFragmentScaleBias warp_loaded_frag_A_scale_bias[2];
WarpTransformedFragmentA warp_transformed_frag_A[2];
WarpTransformedFragmentB warp_transformed_frag_B[2];
Operator warp_mma;
cutlass::conv::warp::FpropScaleBiasReluTransform<WarpTransformedFragmentA,
WarpLoadedFragmentScaleBias>
elementwise_transform;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_A_scale_bias_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_A_scale_bias_.load(
warp_loaded_frag_A_scale_bias[0]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_A_scale_bias_;
++this->warp_tile_iterator_B_;
// Start issuing the first group of the next stage outside of the mainloop
copy_tiles_and_advance(iterator_A, iterator_A_scale_bias, iterator_B);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0],
warp_loaded_frag_A[0], warp_loaded_frag_B[0]);
elementwise_transform(warp_transformed_frag_A[0],
warp_loaded_frag_A_scale_bias[0]);
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_scale_bias_.set_kgroup_index(
(warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_A_scale_bias_.load(
warp_loaded_frag_A_scale_bias[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_A_scale_bias_;
++this->warp_tile_iterator_B_;
if (warp_mma_k > 0) {
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B[warp_mma_k % 2]);
elementwise_transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_loaded_frag_A_scale_bias[warp_mma_k % 2]);
}
warp_mma(
accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
accum
);
// Issue global->shared copies for the next stage
int group_start_iteration_A, group_start_iteration_B;
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
group_start_iteration_A = 0;
group_start_iteration_B = 0;
} else {
group_start_iteration_A =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB;
}
copy_tiles_and_advance(iterator_A, iterator_A_scale_bias, iterator_B,
group_start_iteration_A,
group_start_iteration_B);
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
elementwise_transform(
warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_A_scale_bias[(warp_mma_k + 1) % 2]);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages of cp.async have committed
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A.advance();
iterator_A_scale_bias.advance();
iterator_B.advance();
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_A_scale_bias_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_A_scale_bias_.add_tile_offset(
{0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_A_scale_bias_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
}
}
}
// Insert fence and wait for all outstanding cp.async operations to commit.
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h/0 | {
"file_path": "include/cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h",
"repo_id": "include",
"token_count": 12230
} | 25 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Helper macros for the CUTLASS library
*/
#pragma once
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef CUTLASS_NAMESPACE
#define concat_tok(a, b) a ## b
#define mkcutlassnamespace(pre, ns) concat_tok(pre, ns)
#define cutlass mkcutlassnamespace(cutlass_, CUTLASS_NAMESPACE)
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__))
#define CUTLASS_HOST_DEVICE __forceinline__ __device__ __host__
#define CUTLASS_DEVICE __forceinline__ __device__
#elif defined(__CUDACC_RTC__)
#define CUTLASS_HOST_DEVICE __forceinline__ __device__
#define CUTLASS_DEVICE __forceinline__ __device__
#else
#define CUTLASS_HOST_DEVICE inline
#define CUTLASS_DEVICE inline
#endif
#define CUTLASS_HOST __host__
#define CUTLASS_GLOBAL __global__ static
////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename T>
CUTLASS_HOST_DEVICE void __CUTLASS_UNUSED(T const &)
{ }
#if defined(__GNUC__)
#define CUTLASS_UNUSED(expr) __CUTLASS_UNUSED(expr)
#else
#define CUTLASS_UNUSED(expr) do { ; } while (&expr != &expr)
#endif
#ifdef _MSC_VER
// Provides support for alternative operators 'and', 'or', and 'not'
#include <iso646.h>
#endif // _MSC_VER
#if !defined(__CUDACC_RTC__)
#include <assert.h>
#endif
#if defined(__CUDA_ARCH__)
#if defined(_MSC_VER)
#define CUTLASS_NOT_IMPLEMENTED() { printf("%s not implemented\n", __FUNCSIG__); asm volatile ("brkpt;\n"); }
#else
#define CUTLASS_NOT_IMPLEMENTED() { printf("%s not implemented\n", __PRETTY_FUNCTION__); asm volatile ("brkpt;\n"); }
#endif
#else
#if defined(_MSC_VER)
#define CUTLASS_NOT_IMPLEMENTED() assert(0 && __FUNCSIG__)
#else
#define CUTLASS_NOT_IMPLEMENTED() assert(0 && __PRETTY_FUNCTION__)
#endif
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
#ifndef CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
#define CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED 0
#endif
// CUDA 10.1 introduces the mma instruction
#if !defined(CUTLASS_ENABLE_TENSOR_CORE_MMA)
#define CUTLASS_ENABLE_TENSOR_CORE_MMA 0
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#define CUTLASS_ASSERT(x) assert(x)
////////////////////////////////////////////////////////////////////////////////////////////////////
// CUTLASS_PRAGMA_(UNROLL|NO_UNROLL) optimization directives for the CUDA compiler.
#if defined(__CUDA_ARCH__) && !defined(__INTELLISENSE__)
#if defined(__CUDACC_RTC__) || (defined(__clang__) && defined(__CUDA__))
#define CUTLASS_PRAGMA_UNROLL _Pragma("unroll")
#define CUTLASS_PRAGMA_NO_UNROLL _Pragma("unroll 1")
#else
#define CUTLASS_PRAGMA_UNROLL #pragma unroll
#define CUTLASS_PRAGMA_NO_UNROLL #pragma unroll 1
#endif
#define CUTLASS_GEMM_LOOP CUTLASS_PRAGMA_NO_UNROLL
#else
#define CUTLASS_PRAGMA_UNROLL
#define CUTLASS_PRAGMA_NO_UNROLL
#define CUTLASS_GEMM_LOOP
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if !defined(__CUDACC_RTC__)
#define CUTLASS_THREAD_LOCAL thread_local
#else
#define CUTLASS_THREAD_LOCAL
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(_MSVC_LANG)
# define CUTLASS_CPLUSPLUS _MSVC_LANG
#else
# define CUTLASS_CPLUSPLUS __cplusplus
#endif
#if (201700L <= CUTLASS_CPLUSPLUS)
#define CUTLASS_CONSTEXPR_IF_CXX17 constexpr
#define CUTLASS_CXX17_OR_LATER 1
#else
#define CUTLASS_CONSTEXPR_IF_CXX17
#define CUTLASS_CXX17_OR_LATER 0
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
}; // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/detail/helper_macros.hpp/0 | {
"file_path": "include/cutlass/detail/helper_macros.hpp",
"repo_id": "include",
"token_count": 1854
} | 26 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cutlass/numeric_conversion.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::fusion {
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Fusion Operations
// Template args must not be implementation dependent
//
/////////////////////////////////////////////////////////////////////////////////////////////////
struct FusionOperation {
// metadata types/queries that can be overrided
using ElementOutput = void;
using ElementCompute = void;
using ElementSource = void;
static constexpr bool IsSourceSupported = false;
using ElementScalar = void;
static constexpr int AlignmentScalar = 0;
static constexpr bool IsScaleFactorSupported = false;
static constexpr bool IsPerRowScaleSupported = false;
using ElementBias = void;
static constexpr int AlignmentBias = 0;
static constexpr bool IsPerRowBiasSupported = false;
static constexpr bool IsDePerRowBiasSupported = false;
using ActivationFn = void;
static constexpr bool IsEltActSupported = false;
static constexpr bool IsDeEltActSupported = false;
using ElementAux = void;
using GmemLayoutTagAux = void;
static constexpr int AlignmentAux = 0;
static constexpr bool IsAuxOutSupported = false;
static constexpr bool IsAuxInSupported = false;
using ElementAmax = void;
static constexpr bool IsAbsMaxSupported = false;
};
// D = alpha * acc
template<
class ElementOutput_,
class ElementCompute_,
class ElementScalar_ = ElementCompute_,
FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest
>
struct ScaledAcc : FusionOperation {
using ElementOutput = ElementOutput_;
using ElementCompute = ElementCompute_;
using ElementScalar = ElementScalar_;
static constexpr int AlignmentScalar = 1;
static constexpr auto RoundStyle = RoundStyle_;
};
// D = alpha * acc + beta * C
template<
class ElementOutput_,
class ElementCompute_,
class ElementSource_ = ElementOutput_,
class ElementScalar_ = ElementCompute_,
FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest
>
struct LinearCombination
: ScaledAcc<ElementOutput_, ElementCompute_, ElementScalar_, RoundStyle_> {
using ElementSource = ElementSource_;
static constexpr bool IsSourceSupported = true;
};
// D = activation(alpha * acc + beta * C)
template<
template <class> class ActivationFn_,
class ElementOutput_,
class ElementCompute_,
class ElementSource_ = ElementOutput_,
class ElementScalar_ = ElementCompute_,
FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest
>
struct LinCombEltAct
: LinearCombination<ElementOutput_, ElementCompute_, ElementSource_, ElementScalar_, RoundStyle_> {
using ActivationFn = ActivationFn_<ElementCompute_>;
static constexpr bool IsEltActSupported = true;
};
// D = alpha * acc + beta * C + per-row bias
template<
class ElementOutput_,
class ElementCompute_,
class ElementBias_ = ElementOutput_,
class ElementSource_ = ElementOutput_,
class ElementScalar_ = ElementCompute_,
int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>,
FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest
>
struct LinCombPerRowBias
: LinearCombination<ElementOutput_, ElementCompute_, ElementSource_, ElementScalar_, RoundStyle_> {
using ElementBias = ElementBias_;
static constexpr int AlignmentBias = AlignmentBias_;
static constexpr bool IsPerRowBiasSupported = true;
};
// D = activation(alpha * acc + beta * C + per-row bias)
template<
template <class> class ActivationFn_,
class ElementOutput_,
class ElementCompute_,
class ElementBias_ = ElementOutput_,
class ElementSource_ = ElementOutput_,
class ElementScalar_ = ElementCompute_,
int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>,
FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest
>
struct LinCombPerRowBiasEltAct
: LinCombPerRowBias<ElementOutput_, ElementCompute_,
ElementBias_, ElementSource_, ElementScalar_, AlignmentBias_, RoundStyle_> {
using ActivationFn = ActivationFn_<ElementCompute_>;
static constexpr bool IsEltActSupported = true;
};
// D = activation(alpha * acc + beta * C + per-row bias)
// aux = alpha * acc + beta * C + per-row bias
template<
class GmemLayoutTagAux_,
template <class> class ActivationFn_,
class ElementOutput_,
class ElementCompute_,
class ElementAux_ = ElementOutput_,
class ElementBias_ = ElementOutput_,
class ElementSource_ = ElementOutput_,
class ElementScalar_ = ElementCompute_,
int AlignmentAux_ = 128 / sizeof_bits_v<ElementAux_>,
int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>,
FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest
>
struct LinCombPerRowBiasEltActAux
: LinCombPerRowBiasEltAct<ActivationFn_, ElementOutput_, ElementCompute_,
ElementBias_, ElementSource_, ElementScalar_, AlignmentBias_, RoundStyle_> {
using ElementAux = ElementAux_;
using GmemLayoutTagAux = GmemLayoutTagAux_;
static constexpr int AlignmentAux = AlignmentAux_;
static constexpr bool IsAuxOutSupported = true;
};
// D = activation(per-row alpha * acc + per-row beta * C + per-row bias)
template<
template <class> class ActivationFn_,
class ElementOutput_,
class ElementCompute_,
class ElementBias_ = ElementOutput_,
class ElementSource_ = ElementOutput_,
class ElementScalar_ = ElementCompute_, // per-row alpha/beta
int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>,
int AlignmentScalar_ = 128 / sizeof_bits_v<ElementScalar_>,
FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest
>
struct PerRowLinCombPerRowBiasEltAct
: LinCombPerRowBiasEltAct<ActivationFn_, ElementOutput_, ElementCompute_,
ElementBias_, ElementSource_, ElementScalar_, AlignmentBias_, RoundStyle_> {
static constexpr int AlignmentScalar = AlignmentScalar_;
static constexpr bool IsPerRowScaleSupported = true;
};
// Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias
// if D is fp8
// D = scale_d * activation(Z)
// else
// D = activation(Z)
template<
template <class> class ActivationFn_,
class ElementOutput_,
class ElementCompute_,
class ElementBias_ = ElementOutput_,
class ElementSource_ = ElementOutput_,
class ElementScalar_ = ElementCompute_,
int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>,
FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest
>
struct ScaledLinCombPerRowBiasEltAct
: LinCombPerRowBiasEltAct<ActivationFn_, ElementOutput_, ElementCompute_,
ElementBias_, ElementSource_, ElementScalar_, AlignmentBias_, RoundStyle_> {
static constexpr bool IsScaleFactorSupported = true;
};
// Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias
// if D is fp8
// amax_d = max(abs(elements in activation(Z)))
// D = scale_d * activation(Z)
// else
// D = activation(Z)
// if Aux is fp8
// amax_aux = max(abs(elements in Z))
// Aux = scale_aux * Z
// else
// Aux = Z
template<
class GmemLayoutTagAux_,
template <class> class ActivationFn_,
class ElementOutput_,
class ElementCompute_,
class ElementAux_ = ElementOutput_,
class ElementAmax_ = ElementCompute_,
class ElementBias_ = ElementOutput_,
class ElementSource_ = ElementOutput_,
class ElementScalar_ = ElementCompute_,
int AlignmentAux_ = 128 / sizeof_bits_v<ElementAux_>,
int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>,
FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest
>
struct ScaledLinCombPerRowBiasEltActAmaxAux
: ScaledLinCombPerRowBiasEltAct<ActivationFn_, ElementOutput_, ElementCompute_,
ElementBias_, ElementSource_, ElementScalar_, AlignmentBias_, RoundStyle_> {
using ElementAmax = ElementAmax_;
static constexpr bool IsAbsMaxSupported = true;
using ElementAux = ElementAux_;
using GmemLayoutTagAux = GmemLayoutTagAux_;
static constexpr int AlignmentAux = AlignmentAux_;
static constexpr bool IsAuxOutSupported = true;
};
// Z = Aux
// dY = alpha * acc + beta * C
// D = d_activation(dY, Z)
template<
class GmemLayoutTagAux_,
template <class> class ActivationFn_,
class ElementOutput_,
class ElementCompute_,
class ElementAux_ = ElementOutput_,
class ElementSource_ = ElementOutput_,
class ElementScalar_ = ElementCompute_,
int AlignmentAux_ = 128 / sizeof_bits_v<ElementAux_>,
FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest
>
struct LinCombDeEltAct
: LinearCombination<ElementOutput_, ElementCompute_, ElementSource_, ElementScalar_, RoundStyle_> {
using ActivationFn = ActivationFn_<ElementCompute_>;
static constexpr bool IsDeEltActSupported = true;
using ElementAux = ElementAux_;
using GmemLayoutTagAux = GmemLayoutTagAux_;
static constexpr int AlignmentAux = AlignmentAux_;
static constexpr bool IsAuxInSupported = true;
};
// Z = Aux
// dY = alpha * acc + beta * C
// D = d_activation(dY, Z)
// dBias = sum of columns of D
template<
class GmemLayoutTagAux_,
template <class> class ActivationFn_,
class ElementOutput_,
class ElementCompute_,
class ElementAux_ = ElementOutput_,
class ElementBias_ = ElementCompute_,
class ElementSource_ = ElementOutput_,
class ElementScalar_ = ElementCompute_,
int AlignmentAux_ = 128 / sizeof_bits_v<ElementAux_>,
int AlignmentBias_ = 128 / sizeof_bits_v<ElementBias_>,
FloatRoundStyle RoundStyle_ = FloatRoundStyle::round_to_nearest
>
struct LinCombDeEltActDePerRowBias
: LinCombDeEltAct<GmemLayoutTagAux_, ActivationFn_, ElementOutput_, ElementCompute_,
ElementAux_, ElementSource_, ElementScalar_, AlignmentAux_, RoundStyle_> {
using ElementBias = ElementBias_;
static constexpr int AlignmentBias = AlignmentBias_;
static constexpr bool IsDePerRowBiasSupported = true;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::fusion
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/fusion/operations.hpp/0 | {
"file_path": "include/cutlass/epilogue/fusion/operations.hpp",
"repo_id": "include",
"token_count": 3682
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/scale_type.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <class Activation, class = void>
struct GenericActivationTraits {
static constexpr bool IsArgumentsNeeded = false;
struct Arguments {};
};
template <class Activation>
struct GenericActivationTraits<Activation, decltype(typename Activation::Arguments(), void())> {
static constexpr bool IsArgumentsNeeded = true;
using Arguments = typename Activation::Arguments;
};
template <typename T>
struct LinearCombinationGenericParams {
T alpha; ///< scales accumulators
T beta; ///< scales source tensor
T const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
T const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
LinearCombinationGenericParams():
alpha(T(1)),
beta(T(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
LinearCombinationGenericParams(
T alpha,
T beta = T(0)
): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
LinearCombinationGenericParams(
T const *alpha_ptr,
T const *beta_ptr = nullptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator followed by an activation function to an array of elements.
///
/// D = activation(alpha * accumulator + beta * source + uniform)
///
template <
template<typename T> class ActivationFunctor,
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
bool IsHeavy = false
>
class LinearCombinationGeneric {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static bool const kIsHeavy = IsHeavy;
static int const kCount = Count;
static const ScaleType::Kind kScale = Scale;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentSource = Array<ElementOutput, kCount>;
using FragmentCompute = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params
: LinearCombinationGenericParams<ElementCompute>,
GenericActivationTraits<ActivationFunctor<ElementCompute>>::Arguments {
using LinearCombinationGenericParams<ElementCompute>::LinearCombinationGenericParams;
};
private:
//
// Data members
//
Params params_;
bool skip_elementwise_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationGeneric(Params const ¶ms) {
params_ = params;
params_.alpha = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
params_.beta = (params.beta_ptr ? *params.beta_ptr : params.beta);
skip_elementwise_ = false;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return params_.beta != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
params_.beta = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
skip_elementwise_ = true;
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
ActivationFunctor<FragmentCompute> activation;
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(params_.alpha, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(params_.beta, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(params_.alpha, converted_accumulator, intermediate); // D = alpha * Accum + X
}
if constexpr (GenericActivationTraits<ActivationFunctor<ElementCompute>>::IsArgumentsNeeded) {
intermediate = skip_elementwise_ ? intermediate : activation(intermediate, params_);
} else {
intermediate = skip_elementwise_ ? intermediate : activation(intermediate);
}
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_accumulator;
ActivationFunctor<FragmentCompute> activation;
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_accumulator(params_.alpha, converted_accumulator); // D = alpha * Accum
}
if constexpr (GenericActivationTraits<ActivationFunctor<FragmentCompute>>::IsArgumentsNeeded) {
intermediate = skip_elementwise_ ? intermediate : activation(intermediate, params_);
} else {
intermediate = skip_elementwise_ ? intermediate : activation(intermediate);
}
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
| include/cutlass/epilogue/thread/linear_combination_generic.h/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_generic.h",
"repo_id": "include",
"token_count": 3155
} | 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
The shared memory resource is time-sliced across warps.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/epilogue_base_streamk.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(!IsEpilogueFunctorHeavy<OutputOp_>::value)
>
class Epilogue :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>,
public EpilogueBaseStreamK<
Shape_,
PartitionsK,
WarpMmaOperator_,
AccumulatorFragmentIterator_>
{
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
using BaseStreamK = EpilogueBaseStreamK<
Shape_,
PartitionsK,
WarpMmaOperator_,
AccumulatorFragmentIterator_>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// Number of warps per block
using WarpCount = typename Base::WarpCount;
/// Number of threads per block
static int const kBlockThreads = 32 * WarpCount::kCount;
/// Per-thread accumulator tile type
using AccumulatorTile = typename Base::AccumulatorTile;
/// Numerical accumulation element type
using ElementAccumulator = typename WarpMmaOperator::ElementC;
/// Fragment type used by the accumulator tile's fragment iterator
using AccumulatorFragment = typename AccumulatorFragmentIterator::Fragment;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Vector type used by the global output iterator
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Vector type used by the shared output iterator
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
public:
static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
static_assert(kPartitionsK == 1 || Base::kFragmentsPerIteration == 1, "One of these must be exactly 1.");
public:
/// Aspect for when epilogue source is not needed
struct SourceAspectNotNeeded
{
/// Constructor
CUTLASS_DEVICE
SourceAspectNotNeeded()
{}
// No-op
CUTLASS_DEVICE
void load() { }
/// Invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &aligned_accum_fragment)
{
OutputAccessType *output_frag_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment);
AccumulatorAccessType const *compute_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i)
{
// Call the output operator
output_frag_ptr[i] = output_op(compute_frag_ptr[i]);
}
}
};
/// Aspect for when epilogue source is needed
struct SourceAspectNeeded
{
OutputTileIterator source_iterator;
typename OutputTileIterator::Fragment source_fragment;
/// Invoke the output functor over each vector of output
CUTLASS_DEVICE
static void apply_output_operator(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &aligned_accum_fragment,
typename OutputTileIterator::Fragment const &source_fragment)
{
OutputAccessType *output_frag_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment);
AccumulatorAccessType const *compute_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
OutputAccessType const *source_frag_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i)
{
// Call the output operator
output_frag_ptr[i] = output_op(compute_frag_ptr[i], source_frag_ptr[i]);
}
}
/// Constructor
CUTLASS_DEVICE
SourceAspectNeeded(OutputTileIterator source_iterator) :
source_iterator(source_iterator)
{
source_fragment.clear();
}
// Load addend source fragment from global memory
CUTLASS_DEVICE
void load() {
source_iterator.load(source_fragment);
++source_iterator;
}
/// Invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &aligned_accum_fragment)
{
apply_output_operator(output_fragment, output_op, aligned_accum_fragment, source_fragment);
}
};
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Thread index in the threadblock
int thread_idx;
/// Warp index in the threadblock
int warp_idx;
public:
/// Constructor
CUTLASS_DEVICE
Epilogue(
typename Base::SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx) ///< Id of thread within warp
:
Base(shared_storage, thread_idx, warp_idx, lane_idx),
BaseStreamK(thread_idx),
shared_load_iterator_(shared_storage.reference(), thread_idx),
thread_idx(thread_idx),
warp_idx(warp_idx)
{}
/// Aggregates the accumulator sets shared by peer blocks in the global workspace,
/// performing epilogue computations, writing to output
CUTLASS_DEVICE
void reduce(
int peer_idx_begin,
int peer_idx_end,
int reduce_fragment_idx,
void *element_workspace,
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
OutputTileIterator source_iterator) ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
{
// Reduce peer accumulator fragments into one fragment
AccumulatorFragment accum_fragment;
BaseStreamK::reduce(accum_fragment, peer_idx_begin, peer_idx_end, reduce_fragment_idx, element_workspace);
// Store fragment to shared memory
this->warp_tile_iterator_.store(accum_fragment);
__syncthreads();
// Initialize/load source-fragment data
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
if (output_op.is_source_needed())
{
source_iterator += reduce_fragment_idx;
source_iterator.load(source_fragment);
}
// Load fragment from shared memory
typename SharedLoadIterator::Fragment aligned_accum_fragment;
shared_load_iterator_.load(aligned_accum_fragment);
// Add fragments shared by other k partitions
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
typename SharedLoadIterator::Fragment aligned_addend_fragment;
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_addend_fragment);
aligned_accum_fragment = add_fragments(aligned_accum_fragment, aligned_addend_fragment);
}
}
// Compute the output result
typename OutputTileIterator::Fragment output_fragment;
// Apply the output operator
SourceAspectNeeded::apply_output_operator(
output_fragment,
output_op,
aligned_accum_fragment,
source_fragment);
// Store the final result
destination_iterator += reduce_fragment_idx;
destination_iterator.store(output_fragment);
}
/// Perform the epilogue computations and stream the result to global memory.
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators) ///< Complete warp-level accumulator tile
{
operator()(output_op, destination_iterator, accumulators, SourceAspectNotNeeded());
}
/// Perform the epilogue computations and stream the result to global memory. Implements
/// two alternative codepaths, depending on whether the output op requires addend data to be loaded.
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator ) ///< Tile iterator for addend source
{
if (output_op.is_source_needed())
{
operator()(output_op, destination_iterator, accumulators, SourceAspectNeeded(source_iterator));
}
else
{
operator()(output_op, destination_iterator, accumulators, SourceAspectNotNeeded());
}
}
/// Perform the epilogue computations and stream the result to global memory. Implements a
/// single codepath, regardless of whether the output op requires addend data to be loaded
CUTLASS_DEVICE
void unified(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator ) ///< Tile iterator for addend source
{
if (!output_op.is_source_needed())
{
source_iterator.clear_mask();
__syncthreads(); // Dummy (CUDA 11.0)
}
operator()(output_op, destination_iterator, accumulators, SourceAspectNeeded(source_iterator));
}
template<class Seq>
struct acc2smem;
template <size_t... Seq>
struct acc2smem<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
template <typename SourceAspect>
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
SourceAspect source)
{
// Iterator over warp-level accumulator fragment
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter)
{
//
// Load the source
//
source.load();
//
// Convert and store fragment
//
__syncthreads();
acc2smem<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Compute the output result
//
typename OutputTileIterator::Fragment output_fragment;
source.apply_output_operator(output_fragment, output_op, aligned_accum_fragment[0]);
//
// Store the final result
//
destination_iterator.store(output_fragment);
++destination_iterator;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue.h",
"repo_id": "include",
"token_count": 6815
} | 29 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Visitor tree operation base implementation to enable composable fusions
for the CUTLASS 2x epilogue
*/
#pragma once
#include "cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::threadblock {
using namespace cute;
using cute::tuple;
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <class... Ops>
struct VisitorImpl2x: fusion::detail::Sm90VisitorImplBase<Ops...> {
using fusion::detail::Sm90VisitorImplBase<Ops...>::Sm90VisitorImplBase;
using fusion::detail::Sm90VisitorImplBase<Ops...>::ops;
template <class CallbacksTuple>
struct Callbacks {
// Callbacks can store non-persistent variables (e.g. tensors) or copies of persistent variables
CallbacksTuple callbacks_tuple;
/// Called at the start of the epilogue just before iterating over accumulator slices
CUTLASS_DEVICE void
begin_epilogue() {
for_each(callbacks_tuple,
[] (auto& callbacks) {
callbacks.begin_epilogue();
}
);
}
/// Called at the start of one step before starting accumulator exchange
CUTLASS_DEVICE void
begin_step(int step_idx) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.begin_step(step_idx);
}
);
}
/// Called at the start of a row
CUTLASS_DEVICE void
begin_row(int row_idx) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.begin_row(row_idx);
}
);
}
/// Called after accumulators have been exchanged for each accumulator vector
template <typename ElementAccumulator, typename... ElementInputs, int FragmentSize>
CUTLASS_DEVICE auto // returns an Array
visit(int iter_idx, int row_idx, int column_idx, int frg_idx,
Array<ElementAccumulator, FragmentSize> const& frg_acc,
Array<ElementInputs, FragmentSize> const&... frg_inputs) // depends on the N-naryness of the op
= delete; // Must be implemented for each operation
/// Called at the start of a row
CUTLASS_DEVICE void
end_row(int row_idx) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.end_row(row_idx);
}
);
}
/// Called after all accumulator elements have been visited
CUTLASS_DEVICE void
end_step(int step_idx) {
for_each(callbacks_tuple,
[&] (auto& callbacks) {
callbacks.end_step(step_idx);
}
);
}
/// Called after all steps have been completed
CUTLASS_DEVICE void
end_epilogue() {
for_each(callbacks_tuple,
[] (auto& callbacks) {
callbacks.end_epilogue();
}
);
}
};
// Callbacks factory
// All operations must redefine this
template <class ProblemShape>
CUTLASS_DEVICE auto
get_callbacks(
gemm::GemmCoord threadblock_tile_offset,
int thread_idx,
ProblemShape problem_shape
) {
return transform_apply(ops,
[&] (auto& op) {
return op.get_callbacks(
threadblock_tile_offset,
thread_idx,
problem_shape);
},
[] (auto&&... callbacks) {
auto callbacks_tuple = cute::make_tuple(callbacks...);
return Callbacks<decltype(callbacks_tuple)>{callbacks_tuple};
}
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Convenience aliases
using EmptyCallbacks = VisitorImpl2x<>::Callbacks<cute::tuple<>>;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace detail
using namespace detail;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Tree visitor
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <class NodeOp, class... ChildOps>
struct TreeVisitor2x : VisitorImpl2x<ChildOps..., NodeOp> {
using VisitorImpl2x<ChildOps..., NodeOp>::VisitorImpl2x;
template<class CallbacksImpl>
struct Callbacks : CallbacksImpl {
CUTLASS_DEVICE
Callbacks(CallbacksImpl&& impl)
: CallbacksImpl(cute::forward<CallbacksImpl>(impl)) {}
using CallbacksImpl::callbacks_tuple;
template <typename ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE auto
visit(int iter_idx, int row_idx, int column_idx, int frg_idx,
Array<ElementAccumulator, FragmentSize> const& frg_acc) {
constexpr int Rm1 = sizeof...(ChildOps);
return cute::detail::tapply(callbacks_tuple,
[&] (auto& child_callbacks) {
return child_callbacks.visit(iter_idx, row_idx, column_idx, frg_idx, frg_acc);
},
[&] (auto&&... frg_inputs) {
return get<Rm1>(callbacks_tuple).visit(iter_idx, row_idx, column_idx, frg_idx, frg_acc, frg_inputs...);
},
make_seq<Rm1>{}
);
}
};
// Callbacks factory
template <class ProblemShape>
CUTLASS_DEVICE auto
get_callbacks(
gemm::GemmCoord threadblock_tile_offset,
int thread_idx,
ProblemShape problem_shape
) {
return Callbacks<
decltype(VisitorImpl2x<ChildOps..., NodeOp>::
get_callbacks(
threadblock_tile_offset,
thread_idx,
problem_shape
))>(
VisitorImpl2x<ChildOps..., NodeOp>::
get_callbacks(
threadblock_tile_offset,
thread_idx,
problem_shape
)
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template<
class ElementCompute,
class EdgeTuple,
class... Ops
>
struct TopologicalVisitor2x : VisitorImpl2x<Ops...> {
static_assert(is_static_v<EdgeTuple>);
static_assert(cute::rank(EdgeTuple{}) == sizeof...(Ops));
static_assert(sizeof...(Ops) > 1);
using VisitorImpl2x<Ops...>::VisitorImpl2x;
template<class CallbacksImpl>
struct Callbacks : CallbacksImpl {
CUTLASS_DEVICE
Callbacks(CallbacksImpl&& impl)
: CallbacksImpl(cute::forward<CallbacksImpl>(impl)) {}
using CallbacksImpl::callbacks_tuple;
template <typename ElementAccumulator, int FragmentSize>
CUTLASS_DEVICE auto
visit(int iter_idx, int row_idx, int column_idx, int frg_idx,
Array<ElementAccumulator, FragmentSize> const& frg_acc) {
constexpr int Rm1 = sizeof...(Ops) - 1;
auto frg_compute_tuple = cute::repeat<Rm1>(Array<ElementCompute, FragmentSize>{});
return cute::detail::tapply(EdgeTuple{}, callbacks_tuple, frg_compute_tuple,
// Visit the first R-1 ops in topological order
[&] (auto&& edge_seq, auto& callbacks, auto& frg_compute) {
frg_compute = cute::detail::apply(frg_compute_tuple,
// Compute the current op with children inputs
[&] (auto const&... frg_inputs) {
auto frg_output = callbacks.visit(iter_idx, row_idx, column_idx, frg_idx, frg_acc, frg_inputs...);
using ElementOutput = typename decltype(frg_output)::Element;
using ConvertOutput = NumericArrayConverter<ElementCompute, ElementOutput, FragmentSize>;
ConvertOutput convert_output{};
return convert_output(frg_output);
},
// Get inputs in the sequence given by the children indices of the current op
edge_seq
);
return frg_compute;
},
// Visit the last op
[&] (auto const&...ops) {
return cute::detail::apply(frg_compute_tuple,
// Compute the last op with children inputs
[&] (auto const&... frg_inputs) {
return get<Rm1>(callbacks_tuple).visit(iter_idx, row_idx, column_idx, frg_idx, frg_acc, frg_inputs...);
},
// Get inputs in the sequence given by the children indices of the last op
get<Rm1>(EdgeTuple{})
);
},
// Transform to visit R-1 ops, apply to visit last op
make_seq<Rm1>{}
);
}
};
// Callbacks factory
template <class ProblemShape>
CUTLASS_DEVICE auto
get_callbacks(
gemm::GemmCoord threadblock_tile_offset,
int thread_idx,
ProblemShape problem_shape
) {
return Callbacks<decltype(
VisitorImpl2x<Ops...>::
get_callbacks(
threadblock_tile_offset,
thread_idx,
problem_shape
))>(
VisitorImpl2x<Ops...>::
get_callbacks(
threadblock_tile_offset,
thread_idx,
problem_shape
)
);
}
};
template <class NodeOp, class... ChildOps>
using Sm80EVT = TreeVisitor2x<NodeOp, ChildOps...>;
template<
class ElementCompute,
class EdgeTuple,
class... Ops
>
using Sm80TopologicalVisitor = TopologicalVisitor2x<ElementCompute, EdgeTuple, Ops...>;
using X = Underscore;
/////////////////////////////////////////////////////////////////////////////////////////////////
// OutputTileThreadLayout translate the CUTLASS 2.X OutputTileOptimalThreadMap into cute layout
// used by CUTLASS 3.X Epilogue
template <
typename ThreadblockShape_,
typename WarpShape_,
typename Element_,
int ElementsPerAccess,
int Stages_=1
>
struct OutputTileThreadLayout: DefaultThreadMapTensorOp<
ThreadblockShape_,
WarpShape_,
ThreadblockShape_::kK/WarpShape_::kK,
Element_,
ElementsPerAccess>::Type {
using Base = typename DefaultThreadMapTensorOp<
ThreadblockShape_,
WarpShape_,
ThreadblockShape_::kK/WarpShape_::kK,
Element_,
ElementsPerAccess>::Type;
using Base::Base;
// Software pipeline stages in epilogue
static_assert(Stages_ <= 2, "Sm80 EVT only support upto 2 Stages.");
static const int Stages = Stages_;
using ThreadShape = cute::Shape<
cute::Int<Base::Detail::kAccessWidth>, // lane col idx
cute::Int<Base::Detail::kAccessRows>, // lane row idx
cute::Int<Base::Detail::kWarpsRemainingForRows>, // warp row idx
cute::Int<Base::Shape::kGroup>, // group idx
cute::Int<Base::Shape::kCluster> // cluster idx
>;
using Shape = typename Base::Shape;
using Count = typename Base::Count;
using ThreadMapShape = cute::Shape<
// Column
Int<Base::kElementsPerAccess>, // vector
Int<Base::Detail::kAccessWidth>, // lane_col_coord
Int<Base::Iterations::kColumn>, // iteration::column
// Row
Int<Base::Detail::kAccessRows>, // lane_row_coord
Int<Base::Iterations::kRow>, // iterations in row
Int<Base::Detail::kWarpsRemainingForRows>, // warp_row_coord
Int<Count::kRow>, // iteration::row
Int<Count::kGroup>, // iteration::group
Int<Shape::kGroup>, // group_coord
Int<Count::kCluster>, // iteration::cluster
Int<Shape::kCluster> // cluster_coord
>;
// The shape of CTA Tile
using CtaShapeMNL = cute::Shape<
Int<
Shape::kRow * Count::kRow *
Shape::kGroup * Count::kGroup *
Shape::kCluster * Count::kCluster
>,
Int<Shape::kColumn * Count::kColumn>,
_1
>;
static const int kElementsPerAccess = ElementsPerAccess;
//
// Methods
//
CUTLASS_DEVICE
static auto tid2coord(int thread_idx) {
return cute::idx2crd(thread_idx, ThreadShape{});
}
template <class TensorInput>
CUTLASS_DEVICE
static auto partition(TensorInput &&xT, int thread_idx, gemm::GemmCoord threadblock_tile_offset) {
// (BLK_M,BLK_N)
Tensor bCxT = local_tile(
xT, CtaShapeMNL{}, make_coord(_,_,_), Step<_1,_1, X>{}
)(_,_,threadblock_tile_offset.m(),threadblock_tile_offset.n(),threadblock_tile_offset.k());
auto [lane_col_coord, lane_row_coord, warp_row_coord, group_coord, cluster_coord] = tid2coord(thread_idx);
// transform to column-major
Tensor bCxT_nm = make_tensor(
std::forward<decltype(bCxT)>(bCxT).data(), make_layout(get<1>(bCxT.layout()), get<0>(bCxT.layout()))
).compose(make_layout(ThreadMapShape{}));
// VECTOR, FRAGMENT_COLUMN, FRAGMENT_ROW, ITERATION_ROW, ITERATION_GROUP, ITERATION_CLUSTER
return bCxT_nm(_,lane_col_coord,_,lane_row_coord,_,warp_row_coord,_,_,group_coord,_,cluster_coord);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::threadblock
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/fusion/visitor_2x.hpp/0 | {
"file_path": "include/cutlass/epilogue/threadblock/fusion/visitor_2x.hpp",
"repo_id": "include",
"token_count": 5548
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_ ///< Element data type
>
class PredicatedTileIteratorStridedDgrad {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
/// Convolution problem size
cutlass::conv::Conv2dProblemSize problem_size;
int tiled_rows_per_filter;
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout, cutlass::conv::Conv2dProblemSize problem_size_, int threadblock_row):
problem_size(problem_size_),
PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
)
{
int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, threadblock_row);
tiled_rows_per_filter = tile_m_per_filter * threadblock_row;
}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
Params params_;
/// Byte-level pointer
uint8_t *byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Starting Dx h and w dimension for strided dgrad mapping
int start_h_, start_w_;
/// Effective Dy P and Q dimensions for strided dgrad mapping
int p_, q_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// A thread's starting column position (assuming steady-state predicates have been computed)
Index thread_start_column_;
/// Internal state counter
int state_[3];
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorStridedDgrad(
Params const & params,
Element *pointer,
TensorCoord extent,
int thread_idx,
FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod,
int start_r, int start_s,
TensorCoord threadblock_offset = TensorCoord()
):
params_(params)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
int r = start_r;
int s = start_s;
if (params_.problem_size.mode == cutlass::conv::Mode::kConvolution) {
r = (params_.problem_size.R - 1 - r);
s = (params_.problem_size.S - 1 - s);
}
// compute starting coordinates in Dx start_h_ and start_w_
strided_dgrad_starting_coords(
params_.problem_size,
stride_h_divmod, stride_w_divmod,
r, s,
start_h_, start_w_);
p_ = (params_.problem_size.H - start_h_ + params_.problem_size.stride_h - 1) / params_.problem_size.stride_h;
q_ = (params_.problem_size.W - start_w_ + params_.problem_size.stride_w - 1) / params_.problem_size.stride_w;
extent_row_ = extent.row();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] = ((thread_offset.column()
+ ThreadMap::Delta::kColumn * c) < extent.column());
}
// Null pointer performs no accesses
if (!pointer) {
mask_.clear();
}
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t *>(pointer);
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
// remapping rows to find the mapped_row_offset
int npq_offset = (row_offset + thread_start_row_) % params_.tiled_rows_per_filter;
// (STEP 4.a) [order NHW rows to be loaded and stored in output Dx NHWxC layout]
int n = npq_offset / (p_ * q_);
int residual = npq_offset % (p_ * q_);
int p = residual / q_;
int q = residual % q_;
int mapped_row_offset = n * (params_.problem_size.H * params_.problem_size.W) +
(start_h_ + p * params_.problem_size.stride_h) * params_.problem_size.W +
(start_w_ + q * params_.problem_size.stride_w);
bool row_guard = mapped_row_offset < extent_row_;
int64_t row_byte_offset = mapped_row_offset * params_.stride;
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int64_t column_byte_offset = (thread_start_column_ + column * ThreadMap::Delta::kColumn) * (sizeof_bits<Element>::value / 8);
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)(byte_pointer + row_byte_offset + column_byte_offset + byte_offset),
guard);
}
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
// remapping rows to find the mapped_row_offset
int npq_offset = (row_offset + thread_start_row_) % params_.tiled_rows_per_filter;
// (STEP 4.a) [order NHW rows to be loaded and stored in output Dx NHWxC layout]
int n = npq_offset / (p_ * q_);
int residual = npq_offset % (p_ * q_);
int p = residual / q_;
int q = residual % q_;
int mapped_row_offset = n * (params_.problem_size.H * params_.problem_size.W) +
(start_h_ + p * params_.problem_size.stride_h) * params_.problem_size.W +
(start_w_ + q * params_.problem_size.stride_w);
bool row_guard = mapped_row_offset < extent_row_;
int64_t row_byte_offset = mapped_row_offset * params_.stride;
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int64_t column_byte_offset = (thread_start_column_ + column * ThreadMap::Delta::kColumn) * (sizeof_bits<Element>::value / 8);
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_store<AccessType, sizeof(AccessType) >(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void *)(byte_pointer + row_byte_offset + column_byte_offset + byte_offset),
guard);
}
}
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_byte_offset(frag, 0);
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorStridedDgrad &operator++() {
++state_[0];
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h",
"repo_id": "include",
"token_count": 5884
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#if !(defined(__clang__) && defined(__CUDA__))
#include "cutlass/cutlass.h"
#include "cutlass/wmma_array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/epilogue/warp/wmma_tensor_op_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorFragment, ///< wmma fragment to be written (concept: nvcuda::wmma::fragment)
typename Layout ///< target shared memory layout
>
class TileIteratorWmmaTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorFragment_ ///< wmma fragment to be written (concept: nvcuda::wmma::fragment)
>
class TileIteratorWmmaTensorOp<WarpShape_, OperatorShape_, OperatorFragment_, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using OperatorFragment = OperatorFragment_;
using Layout = layout::RowMajor;
//
// Derived types
//
using WmmaDataType = typename OperatorFragment::element_type;
using Element = typename cutlass::arch::WmmaToCutlassDataType<WmmaDataType>::Type; ///< Data Type of element stored in nvcuda::wmma::frament
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = WmmaTensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = WmmaFragmentArray<OperatorFragment, Policy::OperatorCount::kColumn * Policy::kWmmaFragmentsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Padding quantity
// (Epilogue shared memory padding for WMMA Gemm kernel is set to run optimaly on Turing)
using Padding = MatrixShape<
0,
4 * Policy::kElementsPerAccess
>;
private:
/// Storage type for accessing memory
//using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>;
//
// Data members
//
/// Internal pointer to shared memory
TensorRef ref_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorWmmaTensorOp(): ref_(nullptr) {
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorWmmaTensorOp(
TensorRef const &ref,
unsigned lane_id
): ref_(ref) {
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorWmmaTensorOp & add_pointer_offset(Index pointer_offset) {
ref_.add_pointer_offset(pointer_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorWmmaTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset({tile_offset.row() * OperatorShape::kM, tile_offset.column() * WarpShape::kN});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorWmmaTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
for(int n=0; n < Policy::OperatorCount::kColumn; n++) {
WmmaDataType* ptr = reinterpret_cast<WmmaDataType*> (ref_.data() + ref_.offset({0, n * OperatorShape::kN}) + pointer_offset);
nvcuda::wmma::store_matrix_sync(
ptr,
frag[n],
ref_.stride()[0],
nvcuda::wmma::layout_t::mem_row_major
);
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
for(int n=0; n < Policy::OperatorCount::kColumn; n++) {
WmmaDataType* ptr = reinterpret_cast<WmmaDataType*> (ref_.data() + ref_.offset({0, n * OperatorShape::kN}) + pointer_offset);
nvcuda::wmma::load_matrix_sync(
frag[n],
ptr,
ref_.stride()[0],
nvcuda::wmma::layout_t::mem_row_major
);
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#endif // !defined(__clang__)
| include/cutlass/epilogue/warp/tile_iterator_wmma_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/warp/tile_iterator_wmma_tensor_op.h",
"repo_id": "include",
"token_count": 2494
} | 32 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cute/arch/cluster_sm90.hpp"
#include "cute/arch/copy_sm90.hpp"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cute/tensor_predicate.hpp"
#include "cute/numeric/arithmetic_tuple.hpp"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
// WarpSpecialized Mainloop
template <
int Stages,
class ClusterShape_,
class TileShape_,
class KernelSchedule,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm90CpAsyncGmmaWarpSpecialized<Stages,ClusterShape_,KernelSchedule>,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm90CpAsyncGmmaWarpSpecialized<Stages,ClusterShape_,KernelSchedule>;
using TileShape = TileShape_;
using ClusterShape = ClusterShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
using CtaShape_MNK = decltype(shape_div(TileShape{}, ClusterShape{}));
using MainloopPipeline = cutlass::PipelineAsync<DispatchPolicy::Stages>;
using PipelineState = typename MainloopPipeline::PipelineState;
using PipelineParams = typename MainloopPipeline::Params;
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
static_assert(DispatchPolicy::Stages >= 2, "Specialization requires Stages set to value 2 or more.");
static_assert(cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeA>::value &&
cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeB>::value,
"MMA atom must source both A and B operand from smem_desc for this mainloop.");
struct SharedStorage
{
struct TensorStorage : cute::aligned_struct<128> {
cute::array_aligned<typename TiledMma::ValTypeA, cute::cosize_v<SmemLayoutA>> smem_A;
cute::array_aligned<typename TiledMma::ValTypeB, cute::cosize_v<SmemLayoutB>> smem_B;
} tensors;
using PipelineStorage = typename MainloopPipeline::SharedStorage;
PipelineStorage pipeline;
};
using TensorStorage = typename SharedStorage::TensorStorage;
using PipelineStorage = typename SharedStorage::PipelineStorage;
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A = nullptr;
StrideA dA{};
ElementB const* ptr_B = nullptr;
StrideB dB{};
uint32_t mma_promotion_interval = 4;
};
// Device side kernel params
using Params = Arguments;
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(
[[maybe_unused]] ProblemShape const& problem_shape,
Arguments const& args,
[[maybe_unused]] void* workspace) {
return args;
}
template<class ProblemShape>
CUTLASS_HOST_DEVICE static bool
can_implement(
ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
bool implementable = true;
implementable = implementable && cutlass::detail::check_alignment<GmemTiledCopyA::NumValSrc>(cute::make_shape(M,K,L), StrideA{});
implementable = implementable && cutlass::detail::check_alignment<GmemTiledCopyB::NumValSrc>(cute::make_shape(N,K,L), StrideB{});
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n");
}
return implementable;
}
static constexpr int K_PIPE_MAX = DispatchPolicy::Stages;
static constexpr int K_PIPE_MMAS = 1;
/// Perform a collective-scoped matrix multiply-accumulate
/// Producer Perspective
template <
class TensorA,
class TensorB,
class KTileIterator,
class ResidueMNK
>
CUTLASS_DEVICE void
load(
MainloopPipeline pipeline,
PipelineState smem_pipe_write,
TensorA const& gA_in,
TensorB const& gB_in,
KTileIterator k_tile_iter, int k_tile_count,
ResidueMNK residue_mnk,
int thread_idx,
TensorStorage& shared_tensors)
{
using namespace cute;
static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident.");
static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident.");
Tensor sA = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
// Shift tensor so residue_k is at origin (Can't read any k_coord < residue_k)
// This aligns the tensor with BLK_K for all but the 0th k_tile
Tensor gA = domain_offset(make_coord(0, get<2>(residue_mnk), 0), gA_in);
Tensor gB = domain_offset(make_coord(0, get<2>(residue_mnk), 0), gB_in);
// Partition the copying of A and B tiles across the threads
GmemTiledCopyA gmem_tiled_copy_a;
GmemTiledCopyB gmem_tiled_copy_b;
auto gmem_thr_copy_a = gmem_tiled_copy_a.get_slice(thread_idx);
auto gmem_thr_copy_b = gmem_tiled_copy_b.get_slice(thread_idx);
Tensor tAgA = gmem_thr_copy_a.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k)
Tensor tAsA = gmem_thr_copy_a.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE)
Tensor tBgB = gmem_thr_copy_b.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k)
Tensor tBsB = gmem_thr_copy_b.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE)
// Allocate predicate tensors for m and n
Tensor tApA = make_tensor<bool>(make_shape(size<1>(tAsA), size<2>(tAsA)), Stride<_1,_0>{});
Tensor tBpB = make_tensor<bool>(make_shape(size<1>(tBsB), size<2>(tBsB)), Stride<_1,_0>{});
// Construct identity layout for sA and sB
Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k)
Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k)
// Repeat the partitioning with identity layouts
Tensor tAcA = gmem_thr_copy_a.partition_S(cA); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k)
Tensor tBcB = gmem_thr_copy_b.partition_S(cB); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k)
// Set predicates for m bounds
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < size<0>(tApA); ++m) {
tApA(m,0) = get<0>(tAcA(0,m,0)) < get<0>(residue_mnk); // blk_m coord < residue_m
}
// Set predicates for n bounds
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < size<0>(tBpB); ++n) {
tBpB(n,0) = get<0>(tBcB(0,n,0)) < get<1>(residue_mnk); // blk_n coord < residue_n
}
// 0-th stage with predication on k to account for residue
{
// LOCK smem_pipe_write for _writing_
pipeline.producer_acquire(smem_pipe_write);
int write_stage = smem_pipe_write.index();
// Copy gmem to smem for *k_tile_iter, predicating for k residue
Tensor tAgAk = tAgA(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tAsA); ++k) {
if (get<1>(tAcA(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gA shifted)
copy_if(gmem_tiled_copy_a, tApA(_,k), tAgAk(_,_,k), tAsA(_,_,k,write_stage));
}
else {
clear(tAsA(_,_,k,write_stage));
}
}
Tensor tBgBk = tBgB(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tBsB); ++k) {
if (get<1>(tBcB(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gB shifted)
copy_if(gmem_tiled_copy_b, tBpB(_,k), tBgBk(_,_,k), tBsB(_,_,k,write_stage));
}
else {
clear(tBsB(_,_,k,write_stage));
}
}
++k_tile_iter;
--k_tile_count;
// UNLOCK smem_pipe_write
pipeline.producer_commit(smem_pipe_write, cutlass::arch::cpasync_barrier_arrive);
// Advance smem_pipe_write
++smem_pipe_write;
}
// Mainloop
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 0; --k_tile_count) {
// LOCK smem_pipe_write for _writing_
pipeline.producer_acquire(smem_pipe_write);
int write_stage = smem_pipe_write.index();
// Copy gmem to smem for *k_tile_iter
copy_if(gmem_tiled_copy_a, tApA, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,write_stage));
copy_if(gmem_tiled_copy_b, tBpB, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,write_stage));
++k_tile_iter;
// UNLOCK smem_pipe_write
pipeline.producer_commit(smem_pipe_write, cutlass::arch::cpasync_barrier_arrive);
// Advance smem_pipe_write
++smem_pipe_write;
}
}
/// Perform a Producer Epilogue to prevent early exit of blocks in a Cluster
CUTLASS_DEVICE void
load_tail(
MainloopPipeline pipeline,
PipelineState smem_pipe_write) {
// Issue the epilogue waits
/* This helps avoid early exit of blocks in Cluster
* Waits for all stages to either be released (all
* Consumer UNLOCKs), or if the stage was never used
* then would just be acquired since the phase was
* still inverted from make_producer_start_state
*/
pipeline.producer_tail(smem_pipe_write);
}
/// Perform a collective-scoped matrix multiply-accumulate
/// Consumer Perspective
template <
class FrgTensorC
>
CUTLASS_DEVICE void
mma(MainloopPipeline pipeline,
PipelineState smem_pipe_read,
FrgTensorC& accum,
int k_tile_count,
int thread_idx,
TensorStorage& shared_tensors,
Params const& mainloop_params)
{
using namespace cute;
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::is_void_v<SmemCopyAtomA>,
"SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions.");
static_assert(cute::is_void_v<SmemCopyAtomB>,
"SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions.");
Tensor sA = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
//
// Define C accumulators and A/B partitioning
//
TiledMma tiled_mma;
auto thread_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCsA = thread_mma.partition_A(sA); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCsB = thread_mma.partition_B(sB); // (MMA,MMA_N,MMA_K,PIPE)
// Allocate "fragments/descriptors"
Tensor tCrA = thread_mma.make_fragment_A(tCsA); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCrB = thread_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE)
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(accum)); // M
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<2>(accum)); // N
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // K
CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tCsB)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE
//
// PIPELINED MAIN LOOP
//
static_assert((0 <= K_PIPE_MMAS) && (K_PIPE_MMAS < K_PIPE_MAX),
"ERROR : Incorrect number of MMAs in flight");
// We release buffers to producer warps(dma load) with some mmas in flight
PipelineState smem_pipe_release = smem_pipe_read;
// Prologue GMMAs
int prologue_mma_count = min(K_PIPE_MMAS, k_tile_count);
tiled_mma.accumulate_ = GMMA::ScaleOut::Zero;
warpgroup_fence_operand(accum);
CUTLASS_PRAGMA_UNROLL
for (int k_tile_prologue = prologue_mma_count; k_tile_prologue > 0; --k_tile_prologue) {
// WAIT on smem_pipe_read until its data are available (phase bit flips from rdPhaseBit value)
auto barrier_token = pipeline.consumer_try_wait(smem_pipe_read);
pipeline.consumer_wait(smem_pipe_read, barrier_token);
int read_stage = smem_pipe_read.index();
warpgroup_arrive();
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) {
// (V,M,K) x (V,N,K) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block,read_stage), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
}
warpgroup_commit_batch();
++smem_pipe_read;
}
warpgroup_fence_operand(accum);
// Mainloop GMMAs
k_tile_count -= prologue_mma_count;
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 0; --k_tile_count) {
// WAIT on smem_pipe_read until its data are available (phase bit flips from rdPhaseBit value)
auto barrier_token = pipeline.consumer_try_wait(smem_pipe_read);
pipeline.consumer_wait(smem_pipe_read, barrier_token);
int read_stage = smem_pipe_read.index();
warpgroup_fence_operand(accum);
warpgroup_arrive();
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) {
// (V,M,K) x (V,N,K) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block,read_stage), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
}
warpgroup_commit_batch();
/// Wait on the GMMA barrier for K_PIPE_MMAS (or fewer) outstanding to ensure smem_pipe_write is consumed
warpgroup_wait<K_PIPE_MMAS>();
warpgroup_fence_operand(accum);
// UNLOCK smem_pipe_release, done _computing_ on it
pipeline.consumer_release(smem_pipe_release);
// Advance smem_pipe_read and smem_pipe_release
++smem_pipe_read;
++smem_pipe_release;
}
warpgroup_fence_operand(accum);
}
/// Perform a Consumer Epilogue to release all buffers
CUTLASS_DEVICE void
mma_tail(MainloopPipeline pipeline, PipelineState smem_pipe_release, int k_tile_count) {
// Prologue GMMAs
int prologue_mma_count = min(K_PIPE_MMAS, k_tile_count);
k_tile_count -= prologue_mma_count;
smem_pipe_release.advance(k_tile_count);
// Wait on all GMMAs to complete
warpgroup_wait<0>();
for (int count = 0; count < prologue_mma_count; ++count) {
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/collective/sm90_mma_multistage_gmma_ss_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/gemm/collective/sm90_mma_multistage_gmma_ss_warpspecialized.hpp",
"repo_id": "include",
"token_count": 8398
} | 33 |
/***************************************************************************************************
* Copyright (c) 2024 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a sparse GEMM kernel that computes the absolute maximum of the output tensor
and applies additional scaling factors to operands.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/sparse_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_sparse_with_absmax.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassSimt,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm70,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
typename threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator>
class SparseGemmWithAbsmax {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
using MathOperator = Operator;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Define the kernel
using GemmKernel = typename kernel::DefaultSparseGemmWithAbsmax<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator
>::GemmKernel;
using ElementE = typename GemmKernel::ElementE;
using LayoutE = typename GemmKernel::LayoutE;
static int const kAlignmentE = 128 / sizeof_bits<ElementE>::value;
static int const kSparse = GemmKernel::kSparse;
static int const kMetaSizeInBits = GemmKernel::kMetaSizeInBits;
static int const kElementsPerElementE = GemmKernel::kElementsPerElementE;
using Arguments = typename GemmKernel::Arguments;
private:
/// Kernel parameters object
typename GemmKernel::Params params_;
public:
/// Constructs the GEMM.
SparseGemmWithAbsmax() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
Status status = GemmKernel::can_implement(
args.problem_size,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D,
args.ref_E.non_const_ref()
);
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
if (kSplitKSerial && args.split_k_slices > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
if (kSplitKSerial) {
if (args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
}
// Initialize the Params structure
params_ = typename GemmKernel::Params{
args.problem_size,
grid_shape,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D,
args.ref_E.non_const_ref(),
args.ref_Aux,
args.ptr_Vector,
args.ldr,
args.epilogue,
static_cast<int *>(workspace)
};
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
params_.ref_A.reset(args.ref_A.non_const_ref().data());
params_.ref_B.reset(args.ref_B.non_const_ref().data());
params_.ref_C.reset(args.ref_C.non_const_ref().data());
params_.ref_D.reset(args.ref_D.data());
params_.ref_E.reset(args.ref_E.non_const_ref().data());
params_.output_op = args.epilogue;
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(GemmKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/gemm_sparse_with_absmax.h/0 | {
"file_path": "include/cutlass/gemm/device/gemm_sparse_with_absmax.h",
"repo_id": "include",
"token_count": 4371
} | 34 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/arch/arch.h"
#include "cutlass/gemm/gemm.h"
#include "cute/layout.hpp"
#include "cute/numeric/integral_constant.hpp"
//////////////////////////////////////////////////////////////////////////////
namespace cutlass::detail {
template <class T, template <int...> class U>
struct is_kernel_tag_of : cute::false_type {};
template <template <int...> class U, int... Args>
struct is_kernel_tag_of<U<Args...>, U> : cute::true_type {};
template <class T, template <int...> class U>
constexpr bool is_kernel_tag_of_v = is_kernel_tag_of<T, U>::value;
}
//////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm {
using namespace cute;
//////////////////////////////////////////////////////////////////////////////
namespace detail {
enum class KernelInputTransformType {
FastF32,
InterleavedComplexTF32
};
} // namespace detail
//////////////////////////////////////////////////////////////////////////////
//
// Kernel schedule policies (the base class tags, one for each kernel layer file)
//
struct KernelMultistage { };
struct KernelCpAsyncWarpSpecialized { };
struct KernelCpAsyncWarpSpecializedPingpong { };
struct KernelCpAsyncWarpSpecializedCooperative { };
struct KernelTma { };
struct KernelTmaWarpSpecialized { };
struct KernelTmaWarpSpecializedPingpong { };
struct KernelTmaWarpSpecializedCooperative { };
struct KernelPtrArrayTmaWarpSpecializedCooperative { };
//////////////////////////////////////////////////////////////////////////////
//
// Builder dispatch policies (not a part of the main CUTLASS layers, simply used to opt into
// specific collective builder dispatches)
//
// FP8 related policies (including Fast Accumulation)
struct KernelTmaWarpSpecializedFP8FastAccum : KernelTmaWarpSpecialized { };
struct KernelTmaWarpSpecializedPingpongFP8FastAccum : KernelTmaWarpSpecializedPingpong { };
struct KernelTmaWarpSpecializedCooperativeFP8FastAccum: KernelTmaWarpSpecializedCooperative { };
struct KernelPtrArrayTmaWarpSpecializedCooperativeFP8FastAccum : KernelPtrArrayTmaWarpSpecializedCooperative { };
// Policies to opt into mixed type GEMMs
struct KernelTmaWarpSpecializedMixedInput : KernelTmaWarpSpecialized { };
struct KernelTmaWarpSpecializedPingpongMixedInput : KernelTmaWarpSpecializedPingpong { };
struct KernelTmaWarpSpecializedCooperativeMixedInput: KernelTmaWarpSpecializedCooperative { };
//////////////////////////////////////////////////////////////////////////////
// Policies for dispatch of epilogue
struct EpilogueDefault { };
struct EpilogueTransposed { };
//////////////////////////////////////////////////////////////////////////////
//
// Collective Mainloop Policies
//
// 2 stage pipeline through 1 stage in smem, 1 in rmem, WITHOUT predicated gmem loads
struct MainloopSm70TwoStageUnpredicated {
constexpr static int Stages = 2;
using ArchTag = arch::Sm70;
using Schedule = KernelMultistage;
using ClusterShape = Shape<_1,_1,_1>;
};
// 2 stage pipeline through 1 stage in smem, 1 in rmem, with predicated gmem loads
struct MainloopSm70TwoStage {
constexpr static int Stages = 2;
using ArchTag = arch::Sm70;
using Schedule = KernelMultistage;
using ClusterShape = Shape<_1,_1,_1>;
};
// n-buffer in smem (cp.async), pipelined with registers, WITHOUT predicated gmem loads
template<int Stages_>
struct MainloopSm80CpAsyncUnpredicated {
constexpr static int Stages = Stages_;
using ArchTag = arch::Sm80;
using Schedule = KernelMultistage;
using ClusterShape = Shape<_1,_1,_1>;
};
// n-buffer in smem (cp.async), pipelined with registers, with predicated gmem loads
template<int Stages_>
struct MainloopSm80CpAsync {
constexpr static int Stages = Stages_;
using ArchTag = arch::Sm80;
using Schedule = KernelMultistage;
using ClusterShape = Shape<_1,_1,_1>;
};
// n-buffer in smem (cp.async), pipelined with Hopper GMMA, with predicated gmem loads, warp specialized dynamic schedule
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelCpAsyncWarpSpecialized
>
struct MainloopSm90CpAsyncGmmaWarpSpecialized {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
using ArchTag = arch::Sm90;
using Schedule = KernelSchedule;
};
// n-buffer in smem (cp.async), pipelined with Hopper GMMA, with predicated gmem loads, warp specialized dynamic schedule
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelCpAsyncWarpSpecialized
>
struct MainloopSm90CpAsyncGmmaRmemAWarpSpecialized {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
using ArchTag = arch::Sm90;
using Schedule = KernelSchedule;
};
// n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, static schedule between TMA and GMMA
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
int PipelineAsyncMmaStages_ = 1
>
struct MainloopSm90TmaGmma {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
constexpr static int PipelineAsyncMmaStages = PipelineAsyncMmaStages_;
using ArchTag = arch::Sm90;
using Schedule = KernelTma;
};
// n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, Warp specialized dynamic schedule
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelTmaWarpSpecializedCooperative
>
struct MainloopSm90TmaGmmaWarpSpecialized {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
using ArchTag = arch::Sm90;
using Schedule = KernelSchedule;
};
// n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, Warp specialized dynamic schedule
// With GMMA's A data from registers.
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelTmaWarpSpecialized
>
struct MainloopSm90TmaGmmaRmemAWarpSpecialized {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
using ArchTag = arch::Sm90;
using Schedule = KernelSchedule;
static_assert(
cute::is_same_v<Schedule, KernelTmaWarpSpecialized> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedPingpong> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedCooperative>,
"KernelSchedule must be one of the warp specialized policies");
};
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelTmaWarpSpecialized
>
struct MainloopSm90TmaGmmaRmemAWarpSpecializedMixedInput {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
using ArchTag = arch::Sm90;
using Schedule = KernelSchedule;
static_assert(
cute::is_same_v<Schedule, KernelTmaWarpSpecialized> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedMixedInput> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedPingpong> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedPingpongMixedInput> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedCooperative> ||
cute::is_same_v<Schedule, KernelTmaWarpSpecializedCooperativeMixedInput>,
"KernelSchedule must be one of the warp specialized policies");
};
// n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, Warp specialized dynamic schedule
// For FP8 kernels
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelTmaWarpSpecialized
>
struct MainloopSm90TmaGmmaWarpSpecializedFP8
: MainloopSm90TmaGmmaWarpSpecialized<Stages_, ClusterShape_, KernelSchedule> {
static_assert(
cute::is_same_v<KernelSchedule, KernelTmaWarpSpecialized> ||
cute::is_same_v<KernelSchedule, KernelTmaWarpSpecializedPingpong> ||
cute::is_same_v<KernelSchedule, KernelTmaWarpSpecializedCooperative>,
"KernelSchedule must be one of the warp specialized policies");
};
// n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, Warp specialized dynamic schedule for Ptr-Array and Grouped Gemm
template<
int Stages_,
class ClusterShape_ = Shape<_1,_1,_1>,
class KernelSchedule = KernelPtrArrayTmaWarpSpecializedCooperative
>
struct MainloopSm90ArrayTmaGmmaWarpSpecialized {
constexpr static int Stages = Stages_;
using ClusterShape = ClusterShape_;
using ArchTag = arch::Sm90;
using Schedule = KernelSchedule;
static_assert(
cute::is_base_of_v<KernelPtrArrayTmaWarpSpecializedCooperative, KernelSchedule>,
"KernelSchedule must be one of the Ptr-Array or Grouped Gemm TMA Warp Specialized Cooperative policies");
};
//////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm
| include/cutlass/gemm/dispatch_policy.hpp/0 | {
"file_path": "include/cutlass/gemm/dispatch_policy.hpp",
"repo_id": "include",
"token_count": 3140
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level SYMM/HEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/kernel/symm_universal.h"
#include "cutlass/gemm/kernel/default_symm.h"
#include "cutlass/gemm/kernel/default_symm_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by SYRK
typename Operator,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_ = BlasMode::kSymmetric,
///
typename Enable = void
>
struct DefaultSymmUniversal;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Real-valued SYMM/HEMM update kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by SYMM/HEMM
typename Operator>
struct DefaultSymmUniversal<
ElementA,
LayoutA,
SideModeA,
FillModeA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
BlasMode::kSymmetric,
typename platform::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultSymmkernel = typename kernel::DefaultSymm<
ElementA,
LayoutA,
SideModeA,
FillModeA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
BlasMode::kSymmetric
>::SymmKernel;
/// Define the kernel in terms of the default kernel
using SymmKernel = kernel::SymmUniversal<
typename DefaultSymmkernel::Mma1,
typename DefaultSymmkernel::Mma2,
typename DefaultSymmkernel::Epilogue,
ThreadblockSwizzle,
SideModeA,
FillModeA
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex-valued SYMM/HEMM update kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by SYRK
typename Operator,
// BlasMode
BlasMode kBlasMode
>
struct DefaultSymmUniversal<
ElementA,
LayoutA,
SideModeA,
FillModeA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
kBlasMode,
typename platform::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultSymmkernel = typename kernel::DefaultSymmComplex<
ElementA,
LayoutA,
SideModeA,
FillModeA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator,
SplitKSerial,
kBlasMode
>::SymmKernel;
/// Define the kernel in terms of the default kernel
using SymmKernel = kernel::SymmUniversal<
typename DefaultSymmkernel::Mma1,
typename DefaultSymmkernel::Mma2,
typename DefaultSymmkernel::Epilogue,
ThreadblockSwizzle,
SideModeA,
FillModeA
>;
};
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/default_symm_universal.h/0 | {
"file_path": "include/cutlass/gemm/kernel/default_symm_universal.h",
"repo_id": "include",
"token_count": 3292
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for GEMM performing a reduction over K partitions in parallel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmSplitKParallel {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
static int const kAlignmentK = Mma::Operator::Shape::kK;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorA::TensorRef ref_A;
typename Mma::IteratorB::Params params_B;
typename Mma::IteratorB::TensorRef ref_B;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::TensorRef ref_D;
typename OutputOp::Params output_op;
int64_t splitk_slice_stride;
int gemm_k_size;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(): swizzle_log_tile(0) { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
typename OutputOp::Params output_op,
int64_t splitk_slice_stride
):
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_D(ref_D.layout()),
ref_D(ref_D),
output_op(output_op),
splitk_slice_stride(splitk_slice_stride) {
int full_gemm_k_iterations = problem_size.k() / Mma::Shape::kK;
int gemm_k_iterations = full_gemm_k_iterations / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
GemmSplitKParallel() { }
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Problem size is a function of threadblock index in the K dimension
int problem_size_k;
if (threadblock_tile_offset.k() + 1 == params.grid_tiled_shape.k()) {
problem_size_k = params.problem_size.k();
}
else {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
int warp_idx = threadIdx.x / 32;
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
iterator_D.add_pointer_offset(params.splitk_slice_stride * threadblock_tile_offset.k());
// Execute the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Run efficient epilogue
epilogue(output_op, iterator_D, accumulators, iterator_D);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/gemm_splitk_parallel.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_splitk_parallel.h",
"repo_id": "include",
"token_count": 2945
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Grouped Rank2K kernel.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/kernel/rank_2k_transpose_operands.h"
#include "cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma1_, ///! Threadblock-scoped matrix multiply-accumulate (A*B^T)
typename Mma2_, ///! Threadblock-scoped matrix multiply-accumulate (B*A^T)
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
ComplexTransform OriginalTransformA_, ///! Public-facing transformation on A
ComplexTransform OriginalTransformB_, ///! Public-facing transformation on B
FillMode FillModeC_, ///! Fill Mode for C (kLower or kUpper)
BlasMode BlasMode_, ///! Blas3 computation mode
GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform
bool Transposed = false
>
struct Rank2KGrouped {
public:
using Mma1 = Mma1_;
using Mma2 = Mma2_;
static_assert(platform::is_same<typename Mma1::LayoutC, cutlass::layout::RowMajor>::value &&
platform::is_same<typename Mma2::LayoutC, cutlass::layout::RowMajor>::value,
"Kernel-level grouped Rank2K requires that LayoutC be row major.");
// Define generic Mma for usecases that use Kernel::Mma
using Mma = Mma1_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_;
static bool const kTransposed = Transposed;
// Public-facing type definitions related to operand element type, layout, and complex conjugate
// operation. Must interact with the 'kTransposed' notion to reflect the original layout,
// fill mode, etc. passed in.
//
// Recall that a Rank2K operation performs (A x BT) + (B x AT)
// This is performed via:
// Mma1 = (A x BT)
// Mma2 = (B x AT)
//
// However, if C needs to be transposed, then this is changed to the following:
// Mma1 = (B x AT)
// Mma2 = (A x BT)
//
// The transformation above is achieved by swapping the Layouts/Elements/Transforms/etc.
// of A and B as they are passed into the instantiations of Mma1 and Mma2.
//
// Now, given access to only Mma1 and Mma2, as well as whether a transposition has occurred,
// we wish to retrieve the original Layouts/Elements/etc. for A and B that were passed into
// the device-level call.
//
// The logic to do this (which is made clearer by referencing the above instantiations) is as follows:
// LayoutA = kTransposed ? Mma2::LayoutA : Mma1::LayoutA
// LayoutB = kTransposed ? Mma1::LayoutA : Mma2::LayoutA
//
// We achieve this swapping by passing Mma1::*A and Mma2::*B to Rank2KMapArguments:
using MapArgumentsA = kernel::detail::Rank2KMapArguments<
typename Mma1::IteratorA::Element,
typename Mma1::IteratorA::Layout,
Mma1::kTransformA,
Mma1::IteratorA::AccessType::kElements,
typename Mma2::IteratorA::Element,
typename Mma2::IteratorA::Layout,
Mma2::kTransformA,
Mma2::IteratorA::AccessType::kElements,
typename Mma1::LayoutC,
FillModeC_,
kTransposed
>;
using ElementA = typename MapArgumentsA::ElementA;
using LayoutA = typename MapArgumentsA::LayoutA;
static int const kAlignmentA = MapArgumentsA::kAlignmentA;
using MapArgumentsB = kernel::detail::Rank2KMapArguments<
typename Mma2::IteratorA::Element,
typename Mma2::IteratorA::Layout,
Mma2::kTransformA,
Mma2::IteratorA::AccessType::kElements,
typename Mma1::IteratorA::Element,
typename Mma1::IteratorA::Layout,
Mma1::kTransformA,
Mma1::IteratorA::AccessType::kElements,
typename Mma2::LayoutC,
FillModeC_,
kTransposed
>;
using ElementB = typename MapArgumentsB::ElementA;
using LayoutB = typename MapArgumentsB::LayoutA;
static int const kAlignmentB = MapArgumentsB::kAlignmentA;
// Use the user-provided TransformA and TransformB, rather than those
// resulting from MapArguments, because Mma1 and Mma2 may have different
// complex transforms than those passed in by the user.
// (See kernel/rank_2k_complex.h for an example of this)
static cutlass::ComplexTransform const kTransformA = OriginalTransformA_;
static cutlass::ComplexTransform const kTransformB = OriginalTransformB_;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename MapArgumentsA::LayoutC;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
static FillMode const kFillModeC = MapArgumentsA::kFillModeC;
// Common type definitions for Mma1 and Mma2
using Operator = typename Mma1::Operator;
using OperatorClass = typename Mma1::Operator::OperatorClass;
using ThreadblockShape = typename Mma1::Shape;
using WarpShape = typename Mma1::Operator::Shape;
using InstructionShape = typename Mma1::Policy::Operator::InstructionShape;
using ArchTag = typename Mma1::ArchTag;
static int const kStages = Mma1::kStages;
static BlasMode const kBlasMode = BlasMode_;
private:
static FillMode const kInternalFillModeC = FillModeC_;
public:
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma1::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using ProblemVisitor = Rank2KGroupedProblemVisitor<
ThreadblockShape,
kGroupScheduleMode,
kThreadCount,
kThreadCount,
kInternalFillModeC>;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode = GemmUniversalMode::kGemm;
GemmCoord *problem_sizes = nullptr;
int problem_count{0};
int threadblock_count{0};
typename EpilogueOutputOp::Params epilogue;
ElementA ** ptr_A = nullptr;
ElementB ** ptr_B = nullptr;
ElementC ** ptr_C = nullptr;
ElementC ** ptr_D = nullptr;
typename LayoutA::Stride::LongIndex *lda = nullptr;
typename LayoutB::Stride::LongIndex *ldb = nullptr;
typename LayoutC::Stride::LongIndex *ldc = nullptr;
typename LayoutC::Stride::LongIndex *ldd = nullptr;
// Only used by device-level operator
GemmCoord *host_problem_sizes = nullptr;
bool allow_early_exit = false;
//
// Methods
//
/// Default ctor
Arguments() = default;
/// Ctor
CUTLASS_HOST_DEVICE
Arguments(
GemmUniversalMode mode,
GemmCoord *problem_sizes,
int problem_count,
int threadblock_count,
typename EpilogueOutputOp::Params epilogue,
ElementA ** ptr_A,
ElementB ** ptr_B,
ElementC ** ptr_C,
ElementC ** ptr_D,
typename LayoutA::Stride::LongIndex *lda,
typename LayoutB::Stride::LongIndex *ldb,
typename LayoutC::Stride::LongIndex *ldc,
typename LayoutC::Stride::LongIndex *ldd,
GemmCoord *host_problem_sizes=nullptr,
bool allow_early_exit=false
):
mode(mode),
problem_sizes(problem_sizes),
problem_count(problem_count),
threadblock_count(threadblock_count),
epilogue(epilogue),
ptr_A(ptr_A),
ptr_B(ptr_B),
ptr_C(ptr_C),
ptr_D(ptr_D),
lda(lda),
ldb(ldb),
ldc(ldc),
ldd(ldd),
host_problem_sizes(host_problem_sizes),
allow_early_exit(allow_early_exit)
{
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
typename ProblemVisitor::Params problem_visitor{};
int threadblock_count = 0;
typename EpilogueOutputOp::Params output_op{};
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
int batch_count = 0;
ElementA** ptr_A = nullptr;
ElementB** ptr_B = nullptr;
ElementC** ptr_C = nullptr;
ElementC** ptr_D = nullptr;
typename LayoutA::Stride::LongIndex* lda = nullptr;
typename LayoutB::Stride::LongIndex* ldb = nullptr;
typename LayoutC::Stride::LongIndex* ldc = nullptr;
typename LayoutC::Stride::LongIndex* ldd = nullptr;
bool allow_early_exit = false;
//
// Methods
//
Params() = default;
CUTLASS_HOST_DEVICE
Params(Arguments const &args, void *workspace = nullptr, int tile_count = 0):
problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count),
threadblock_count(args.threadblock_count),
output_op(args.epilogue),
ptr_A(args.ptr_A),
ptr_B(args.ptr_B),
ptr_C(args.ptr_C),
ptr_D(args.ptr_D),
lda(args.lda),
ldb(args.ldb),
ldc(args.ldc),
ldd(args.ldd),
allow_early_exit(args.allow_early_exit)
{
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr,
int tile_count = 0) {
problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count, workspace, tile_count);
threadblock_count = args.threadblock_count;
output_op = args.output_op;
ptr_A = args.ptr_A;
ptr_B = args.ptr_B;
ptr_C = args.ptr_C;
ptr_D = args.ptr_D;
}
};
/// Shared memory storage structure
struct SharedStorage {
union {
typename Mma1::SharedStorage mma1_main_loop;
typename Mma2::SharedStorage mma2_main_loop;
typename Epilogue::SharedStorage epilogue;
} kernel;
// ProblemVisitor shared storage can't be overlapped with others
typename ProblemVisitor::SharedStorage problem_visitor;
};
public:
//
// Methods
//
Rank2KGrouped() = default;
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Early exit following LAPACK's definition
if (params.allow_early_exit &&
(params.output_op.alpha == ElementC(0)) && (params.output_op.beta == ElementC(1))) {
return;
}
//
// Problem visitor.
//
ProblemVisitor problem_visitor(
params.problem_visitor,
shared_storage.problem_visitor,
blockIdx.x);
// Outer 'persistent' loop to iterate over tiles
while (problem_visitor.next_tile()) {
GemmCoord problem_size = problem_visitor.problem_size();
int32_t problem_idx = problem_visitor.problem_index();
int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx());
GemmCoord grid_shape = problem_visitor.grid_shape(problem_size);
cutlass::gemm::GemmCoord threadblock_tile_offset = problem_visitor.threadblock_offset(threadblock_idx);
//
// Perform checks to determine whether the results of this threadblock will be needed.
// An example of an unneeded threadblock is one that is assigned to compute in the upper
// portion of a Rank2K kernel filled with mode kLower.
//
// TODO: Consider pushing these checks into ProblemVisitor to avoid spuriously
// returning from `next_tile()`.
//
// Early exit if threadblock is out of range
if (grid_shape.m() <= threadblock_tile_offset.m() ||
grid_shape.n() <= threadblock_tile_offset.n()) {
// Next tile
problem_visitor.advance(gridDim.x);
continue;
}
// Skip this tile if Fill Mode is Lower and
// if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal)
if (kInternalFillModeC == cutlass::FillMode::kLower &&
(threadblock_tile_offset.m() + 1) * Mma1::Shape::kM <= threadblock_tile_offset.n() * Mma1::Shape::kN) {
// Next tile
problem_visitor.advance(gridDim.x);
continue;
}
// Skip this tile if Fill Mode is Upper and
// if the entire tile is below the main diagonal (top-right corner is at or below the diagonal)
if (kInternalFillModeC == cutlass::FillMode::kUpper &&
threadblock_tile_offset.m() * Mma1::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) {
// Next tile
problem_visitor.advance(gridDim.x);
continue;
}
bool tile_on_diagonal = false;
// Mark tiles that are being crossed by the main diagonal
// (top-right and bottom-left corners are on either side of the diagonal)
if ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM > threadblock_tile_offset.n() * Mma1::Shape::kN
&& threadblock_tile_offset.m() * Mma1::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) {
tile_on_diagonal = true;
}
int offset_k = 0;
int problem_size_k = problem_size.k();
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < grid_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * problem_size.k();
}
offset_k = threadblock_tile_offset.k() * problem_size.k();
}
ElementA *ptr_A = reinterpret_cast<ElementA *>((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx]));
typename LayoutA::Stride::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]);
ElementB *ptr_B = reinterpret_cast<ElementB *>((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx]));
typename LayoutB::Stride::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]);
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_MxK{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
// Assume identity swizzle
MatrixCoord tb_offset(
threadblock_tile_offset.m() * Mma1::Shape::kM,
threadblock_tile_offset.n() * Mma1::Shape::kN
);
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands for Mma1
typename Mma1::IteratorA iterator_A(
Mma1::IteratorA::Params(ldm_A),
ptr_A,
{problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK);
typename Mma1::IteratorB iterator_BT(
Mma1::IteratorB::Params(ldm_B),
ptr_B,
{problem_size_k, problem_size.n()},
thread_idx,
tb_offset_KxN);
// Construct iterators to A and B operands for Mma2
typename Mma2::IteratorA iterator_B(
Mma2::IteratorA::Params(ldm_B),
ptr_B,
{problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK);
typename Mma2::IteratorB iterator_AT(
Mma2::IteratorB::Params(ldm_A),
ptr_A,
{problem_size_k, problem_size.n()},
thread_idx,
tb_offset_KxN);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply for Mma1 (A x BT)
Mma1 mma1(shared_storage.kernel.mma1_main_loop, thread_idx, warp_idx, lane_idx);
// Construct thread-scoped matrix multiply for Mma2 (B x AT)
Mma2 mma2(shared_storage.kernel.mma2_main_loop, thread_idx, warp_idx, lane_idx);
typename Mma1::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
// Wait for all threads to finish their epilogue phases from the previous tile.
__syncthreads();
// Compute threadblock-scoped matrix multiply-add (A x BT)
mma1(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_BT,
accumulators);
// HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK.
if (kBlasMode == BlasMode::kHermitian) {
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * grid_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C[problem_idx]);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D[problem_idx]);
// If TB not on diagonal, FillMode doesn't apply.
FillMode kFillModeTB = tile_on_diagonal ? kInternalFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
Epilogue::OutputTileIterator::Params(params.ldc[problem_idx]),
ptr_C,
problem_size.mn(),
thread_idx,
tb_offset,
kFillModeTB
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
Epilogue::OutputTileIterator::Params(params.ldd[problem_idx]),
ptr_D,
problem_size.mn(),
thread_idx,
tb_offset,
kFillModeTB
);
Epilogue epilogue(
shared_storage.kernel.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
__syncthreads();
accumulators.clear();
}
// Compute threadblock-scoped matrix multiply-add (B x AT)
mma2(
gemm_k_iterations,
accumulators,
iterator_B,
iterator_AT,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
/* Needed for HER2K where the second HERK is multiplied by conj(alpha) */
typename EpilogueOutputOp::Params second_her2k_params(conj(params.output_op.alpha), 1);
EpilogueOutputOp output_op_her2k(second_her2k_params);
//
// Masked tile iterators constructed from members
//
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * grid_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C[problem_idx]);
// HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK.
if (kBlasMode == BlasMode::kHermitian) {
ptr_C = static_cast<ElementC *>(params.ptr_D[problem_idx]);
}
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D[problem_idx]);
// If TB not on diagonal, FillMode doesn't apply.
FillMode kFillModeTB = tile_on_diagonal ? kInternalFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
Epilogue::OutputTileIterator::Params(params.ldc[problem_idx]),
ptr_C,
problem_size.mn(),
thread_idx,
tb_offset,
kFillModeTB
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
Epilogue::OutputTileIterator::Params(params.ldd[problem_idx]),
ptr_D,
problem_size.mn(),
thread_idx,
tb_offset,
kFillModeTB
);
Epilogue epilogue(
shared_storage.kernel.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
if (kBlasMode == BlasMode::kSymmetric) {
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
} else {
epilogue(
output_op_her2k,
iterator_D,
accumulators,
iterator_C);
}
// Next tile
problem_visitor.advance(gridDim.x);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/rank_2k_grouped.h/0 | {
"file_path": "include/cutlass/gemm/kernel/rank_2k_grouped.h",
"repo_id": "include",
"token_count": 9104
} | 38 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/barrier.h"
#include "cutlass/block_striped.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp"
#include "cutlass/kernel_hardware_info.hpp"
#include "cute/layout.hpp"
#include "cute/tensor.hpp"
namespace cutlass::gemm::kernel::detail {
// Persistent Thread Block (TB) scheduler leveraging stream-K decomposition
template <
class TileShape,
class ClusterShape
>
class PersistentTileSchedulerSm90StreamK {
//
// Data members
//
private:
using UnderlyingScheduler = PersistentTileSchedulerSm90;
private:
using UnderlyingArguments = typename UnderlyingScheduler::Arguments;
using UnderlyingParams = typename UnderlyingScheduler::Params;
uint64_t current_work_linear_idx_ = 0;
public:
using RasterOrder = UnderlyingScheduler::RasterOrder;
using RasterOrderOptions = UnderlyingScheduler::RasterOrderOptions;
// Use a dummy barrier manager to simply get the type used to store the barrier
using BarrierType = typename NamedBarrierManager<1>::T;
using Params = PersistentTileSchedulerSm90StreamKParams;
using ReductionMode = Params::ReductionMode;
using DecompositionMode = Params::DecompositionMode;
struct WorkTileInfo {
int32_t M_idx = 0;
int32_t N_idx = 0;
int32_t K_idx = 0;
int32_t L_idx = 0;
// Number of k tiles to compute for this unit of work. For stream-K, this
// can indicate the number of K tiles across multiple output tiles.
uint32_t k_tile_count = 0;
// Number of k tiles remaining for the work unit as a whole
uint32_t k_tile_remaining = 0;
// Whether this unit of work is the final split for the given tile
bool is_separate_reduction = false;
CUTLASS_HOST_DEVICE
bool
is_valid() const {
// A work tile that computes no K tiles is invalid unless it is a separate-reduction work tile
// (which only performs reduction and epilogue)
return k_tile_count > 0 || is_separate_reduction;
}
CUTLASS_HOST_DEVICE
bool
is_reduction_unit() const {
return is_separate_reduction;
}
CUTLASS_HOST_DEVICE
int32_t
reduction_subtile_idx() const {
// For separate reduction units, the K_idx of the work tile is unused.
// Therefore, we override it to contain the subtile of that the reduction
// unit operates on.
return is_reduction_unit() ? K_idx : -1;
}
CUTLASS_HOST_DEVICE
void
setup_separate_reduction(int32_t epilogue_subtile_idx) {
// Set the epilogue subtile in the K_idx, since this is otherwise unused
// by separate reduction units.
K_idx = epilogue_subtile_idx;
is_separate_reduction = true;
k_tile_count = 0;
// Clean up remaining k tiles
k_tile_remaining = 0;
}
CUTLASS_HOST_DEVICE
static WorkTileInfo
invalid_work_tile() {
return {-1, -1, -1, -1, 0};
}
CUTLASS_HOST_DEVICE
bool
is_final_split(uint32_t k_tiles_per_output_tile) const {
return (K_idx + k_tile_count) == k_tiles_per_output_tile;
}
};
struct Arguments {
Arguments() = default;
Arguments(Arguments const&) = default;
Arguments(Arguments&&) = default;
CUTLASS_HOST_DEVICE
Arguments&
operator=(Arguments const& args) {
splits = args.splits;
max_swizzle_size = args.max_swizzle_size;
raster_order = args.raster_order;
reduction_mode = args.reduction_mode;
decomposition_mode = args.decomposition_mode;
return *this;
}
CUTLASS_HOST_DEVICE
Arguments&
operator=(Arguments&& args) noexcept {
splits = args.splits;
max_swizzle_size = args.max_swizzle_size;
raster_order = args.raster_order;
reduction_mode = args.reduction_mode;
decomposition_mode = args.decomposition_mode;
return *this;
}
CUTLASS_HOST_DEVICE
Arguments(int splits_) : splits(splits_) {}
CUTLASS_HOST_DEVICE
Arguments(int splits_, int max_swizzle_size_, RasterOrderOptions raster_order_, DecompositionMode decomposition_mode_) :
splits(splits_),
max_swizzle_size(max_swizzle_size_),
raster_order(raster_order_),
decomposition_mode(decomposition_mode_) {}
// The splitting factor to be used in a split-K decomposition of the problem.
// If this is set to a value greater than 1, stream-K decomposition logic
// is bypassed in favor of a split-K decomposition.
int splits = 1;
int max_swizzle_size = 1;
RasterOrderOptions raster_order = RasterOrderOptions::Heuristic;
ReductionMode reduction_mode = ReductionMode::Deterministic;
DecompositionMode decomposition_mode = DecompositionMode::Heuristic;
};
// Sink scheduler params as a member
Params scheduler_params;
//
// Methods
//
template <class ProblemShape>
static Params
to_underlying_arguments(
ProblemShape problem_shape,
TileShape tile_shape,
ClusterShape cluster_shape,
KernelHardwareInfo const& hw_info,
Arguments const& args,
void* workspace,
const uint32_t epilogue_subtile = 1) {
static_assert(cute::is_static<TileShape>::value);
static_assert(cute::is_static<ClusterShape>::value);
auto problem_shape_mnkl = cute::append<4>(problem_shape, cute::Int<1>{});
dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape);
uint32_t k_tile_per_output_tile = cute::size(cute::ceil_div(cute::shape<2>(problem_shape_mnkl), cute::shape<2>(TileShape{})));
Params params;
params.initialize(
problem_blocks,
k_tile_per_output_tile,
to_gemm_coord(cluster_shape),
hw_info,
args.splits,
args.max_swizzle_size,
args.raster_order,
args.reduction_mode,
args.decomposition_mode,
workspace,
epilogue_subtile
);
return params;
}
CUTLASS_HOST_DEVICE
static bool
can_implement(Arguments const& args) {
// Split count > 1 is only valid for heuristic and split-K decomposition modes
return (args.splits == 1 ||
args.decomposition_mode == DecompositionMode::Heuristic ||
args.decomposition_mode == DecompositionMode::SplitK);
}
CUTLASS_HOST_DEVICE
PersistentTileSchedulerSm90StreamK() { };
CUTLASS_HOST_DEVICE
PersistentTileSchedulerSm90StreamK(Params const& params_) : scheduler_params(params_) {
if (params_.raster_order_ == RasterOrder::AlongN) {
current_work_linear_idx_ = uint64_t(blockIdx.x) + uint64_t(blockIdx.y) * uint64_t(gridDim.x);
}
else {
current_work_linear_idx_ = uint64_t(blockIdx.x) * uint64_t(gridDim.y) + uint64_t(blockIdx.y);
}
}
CUTLASS_DEVICE
WorkTileInfo
get_current_work() const {
return get_current_work_for_linear_idx(current_work_linear_idx_, scheduler_params);
}
CUTLASS_DEVICE
static WorkTileInfo
get_current_work_for_linear_idx(uint64_t linear_idx, Params const& params) {
// The maximum number of work units is units_per_problem_ * splits_.
// The multiplication by splits_ is used for handling split-K, in which
// units_per_problem_ is equal to the total number of output tiles. To account
// for the fact that we have splits_ peers per output tile, we multiply this
// value by splits_. For stream-K, this multiplication ends up being a no-op
// because splits_ is set to 1 for stream-K.
if(linear_idx >= (params.units_per_problem_ * params.splits_ + params.separate_reduction_units_)) {
// Invalid work. Return an empty result.
return WorkTileInfo::invalid_work_tile();
}
WorkTileInfo work_tile_info;
assign_work(params, linear_idx, work_tile_info);
return work_tile_info;
}
// Returns whether the current work_tile_info passed in should continue to be used. This
// occurs only in the stream-K decomposition with stream-K work units, which encompass
// work over multiple output tiles. If the current work_tile_info should continue to be
// used, it is updated to advance to the next output tile it should cover.
CUTLASS_DEVICE
bool
continue_current_work(WorkTileInfo& work_tile_info) const {
return continue_current_work_for_linear_idx(
current_work_linear_idx_, work_tile_info, scheduler_params);
}
CUTLASS_DEVICE
static bool
continue_current_work_for_linear_idx(
uint64_t linear_idx,
WorkTileInfo& work_tile_info,
Params const& params) {
work_tile_info.k_tile_remaining -= work_tile_info.k_tile_count;
if (work_tile_info.k_tile_remaining == 0) {
return false;
}
assign_work(params, linear_idx, work_tile_info);
return work_tile_info.is_valid();
}
CUTLASS_DEVICE
void
advance_to_next_work(uint32_t advance_count = 1) {
current_work_linear_idx_ += uint64_t(gridDim.x) * uint64_t(gridDim.y) * uint64_t(gridDim.z) * uint64_t(advance_count);
}
// Given the inputs, computes the total number of output blocks this problem will compute over
// Note that this is only the logical size of our grid, not the physical grid we will actually launch.
template <class ProblemShape>
CUTLASS_HOST_DEVICE static
dim3
get_tiled_cta_shape_mnl(ProblemShape problem_shape_mnkl, TileShape cta_shape, ClusterShape cluster_shape) {
return UnderlyingScheduler::get_tiled_cta_shape_mnl(problem_shape_mnkl, cta_shape, cluster_shape);
}
// Given the cluster shape, computes the physical grid we should launch.
template <class ProblemShape>
CUTLASS_HOST_DEVICE static
dim3
get_grid_shape(
ProblemShape problem_shape,
TileShape tile_shape,
ClusterShape cluster_shape,
KernelHardwareInfo hw_info,
Arguments arguments) {
auto problem_shape_mnkl = cute::append<4>(problem_shape, cute::Int<1>{});
dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape);
return Params::get_grid_shape(
problem_blocks,
to_gemm_coord(cluster_shape),
hw_info,
arguments.max_swizzle_size,
arguments.raster_order
);
}
// Returns whether fixup is needed for `work_tile_info`.
CUTLASS_HOST_DEVICE
static bool
requires_fixup(Params const& params, WorkTileInfo const& work_tile_info) {
// Fixup is not needed for invalid or data-parallel tiles
return work_tile_info.is_valid() && work_tile_info.k_tile_count != params.divmod_tiles_per_output_tile_.divisor;
}
CUTLASS_HOST_DEVICE
static bool
requires_separate_reduction(Params const& params) {
return params.requires_separate_reduction();
}
// When the work tile is not special for reduction, it's valid. Otherwise need to skip
// global loading that producer warpgroup do, also math computation that consumer warpgroup do.
CUTLASS_DEVICE
static bool
valid_warpgroup_in_work_tile(WorkTileInfo const& work_tile_info) {
return !work_tile_info.is_reduction_unit();
}
// Performs the reduction across splits for a given output tile.
template <class FrgTensorC>
CUTLASS_DEVICE
static void
fixup(
Params const& params,
WorkTileInfo const& work_tile_info,
FrgTensorC& accumulators,
uint32_t num_barriers,
uint32_t barrier_idx) {
static constexpr uint32_t Offset = static_cast<int>(cutlass::arch::ReservedNamedBarriers::StreamkBarrier0);
static constexpr uint32_t MaxNumNamedBarriers = 2;
using BarrierManager = NamedBarrierManager<NumThreadsPerWarpGroup, Offset, MaxNumNamedBarriers>;
return fixup_helper<FrgTensorC, BarrierManager>(
params, work_tile_info, accumulators, num_barriers, barrier_idx);
}
// Helper for performing the reduction across splits for a given output tile.
template <class FrgTensorC, class BarrierManager>
CUTLASS_DEVICE
static void
fixup_helper(
Params const& params,
WorkTileInfo const& work_tile_info,
FrgTensorC& accumulators,
uint32_t num_barriers,
uint32_t barrier_idx,
uint32_t num_accumulator_mtxs = 1) {
using ElementAccumulator = typename FrgTensorC::value_type;
if (!requires_fixup(params, work_tile_info)) {
return;
}
auto tile_idx = output_tile_index(params, work_tile_info);
// Index of the lock on which to wait
auto lock_idx = (tile_idx * num_barriers) + barrier_idx;
auto reduction_tile_idx = tile_idx;
auto [first_peer_id, my_peer_id, last_peer_id] = tile_peer_range(params, tile_idx, static_cast<uint32_t>(work_tile_info.K_idx));
auto reduction_peer_offset = 0;
if (params.requires_separate_reduction()) {
// If separate reduction is to be performed, each stream-K unit writes its partials
// to a separate portion of the workspace. There are as many of these portions as there
// are peers for a given output tile, so we multiply the tile index by the maximum peer count.
reduction_tile_idx *= Params::max_peers_per_tile(params.sk_units_, params.sk_tiles_);
reduction_peer_offset = my_peer_id * cute::size<0>(TileShape{}) * cute::size<1>(TileShape{});
}
// Reductions use BlockStripedReduce with a width of BarrierManager::ThreadCount under the hood.
// Thus, the start of the reduction space is the same across all threads in a warp group.
int reduction_offset =
(cute::size<0>(TileShape{}) * cute::size<1>(TileShape{}) * reduction_tile_idx * num_accumulator_mtxs) +
reduction_peer_offset +
(size(accumulators) * barrier_idx * BarrierManager::ThreadCount);
ElementAccumulator* group_reduction_workspace = reinterpret_cast<ElementAccumulator*>(params.reduction_workspace_) + reduction_offset;
using AccumulatorArrayT = Array<typename FrgTensorC::value_type, size(FrgTensorC{})>;
using BlockStripedReduceT = BlockStripedReduce<BarrierManager::ThreadCount, AccumulatorArrayT>;
AccumulatorArrayT* reduction_workspace_array = reinterpret_cast<AccumulatorArrayT*>(group_reduction_workspace);
AccumulatorArrayT* accumulator_array = reinterpret_cast<AccumulatorArrayT*>(&accumulators);
int barrier_group_thread_idx = threadIdx.x % BarrierManager::ThreadCount;
// The number of tiles for which reduction is required is either:
// (a) the total number of output tiles (in the case of split-K)
// (b) the number of stream-K tiles (potentially multiplied by peer count if using separate reduction)
// To calculate the total number of output tiles in the split-K case, we
// note that, in the split-K case, the units_per_problem_ member of Params will be
// the total number of output tiles.
uint32_t reduction_tiles = 0;
if (params.splits_ > 1) {
reduction_tiles = params.units_per_problem_;
}
else if (params.requires_separate_reduction()) {
reduction_tiles = params.sk_tiles_ * Params::max_peers_per_tile(params.sk_units_, params.sk_tiles_);
}
else {
reduction_tiles = params.sk_tiles_;
}
auto reduction_workspace_size = Params::get_reduction_workspace_size(
reduction_tiles, to_gemm_coord(TileShape{}), sizeof_bits<ElementAccumulator>::value, num_accumulator_mtxs);
BarrierType* lock_workspace = reinterpret_cast<BarrierType*>(
reinterpret_cast<uint8_t*>(params.reduction_workspace_) + reduction_workspace_size);
if (work_tile_info.is_reduction_unit()) {
plus<AccumulatorArrayT> add_fragments;
auto peer_offset = size(accumulators) * num_barriers * BarrierManager::ThreadCount;
// Wait until the peers collaborating on this output tile have all written
// their accumulators to workspace.
uint32_t num_peers = last_peer_id - first_peer_id + 1;
BarrierManager::wait_eq(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, num_peers);
// Load the first peer's data
BlockStripedReduceT::load(*accumulator_array, reduction_workspace_array, barrier_group_thread_idx);
for (int i = 1; i < num_peers; ++i) {
// Load peer fragment
AccumulatorArrayT addend_fragment;
auto peer_reduction_workspace = reinterpret_cast<AccumulatorArrayT*>(group_reduction_workspace + (i * peer_offset));
BlockStripedReduceT::load(addend_fragment, peer_reduction_workspace, barrier_group_thread_idx);
// Add peer fragment
*accumulator_array = add_fragments(*accumulator_array, addend_fragment);
}
}
else if (!compute_epilogue(work_tile_info, params)) {
if (params.requires_separate_reduction() || work_tile_info.K_idx == 0) {
// The first peer initializes the workspace partials in the non-separate-reduction case,
// and all peers write to their own location in workspace when using separate reduction
BlockStripedReduceT::store(reduction_workspace_array, *accumulator_array, barrier_group_thread_idx);
}
else {
// Wait until the preceding split added its accumulators
BarrierManager::wait_eq(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, work_tile_info.K_idx);
// Perform reduction in workspace
BlockStripedReduceT::reduce(reduction_workspace_array, *accumulator_array, barrier_group_thread_idx);
}
// If separate reduction is being performed, each participating stream-K unit increments the barrier
// by only 1. Otherwise, increment by the K tile count that this unit has processed.
int32_t increment = params.requires_separate_reduction() ? 1 : work_tile_info.k_tile_count;
// Signal our arrival
BarrierManager::arrive_inc(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, increment);
}
else {
if (params.reduction_mode_ == ReductionMode::Deterministic) {
// Wait until the preceding split added its accumulators
BarrierManager::wait_eq(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, work_tile_info.K_idx);
}
else {
// Wait unitl the first split has stored its accumulators
BarrierManager::wait_lt(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, 1);
}
// The block computing the final split for the tile adds previously-reduced partials
// to its accumulators and computes the epilogue.
BlockStripedReduceT::load_add(*accumulator_array, reduction_workspace_array, barrier_group_thread_idx);
}
}
// Returns whether the block assigned this work should compute the epilogue for the corresponding
// output tile. For the case of stream-K, this should only occur if the work is marked as the final split.
CUTLASS_HOST_DEVICE
static bool
compute_epilogue(WorkTileInfo const& work_tile_info, Params const& params) {
// `is_final_split` will be set to `true` for the following scenarios, all of which must compute the epilogue:
// 1. The tile is computed in data-parallel mode
// 2. The tile is computed in split-/stream-K mode and this work unit represents the final split of the tile
// 3. The tile is computed in split-/stream-K mode and separate reduction is used, and this is a separate reduction unit
return work_tile_info.is_valid() &&
(work_tile_info.is_final_split(params.divmod_tiles_per_output_tile_.divisor) &&
!params.requires_separate_reduction()) || work_tile_info.is_separate_reduction;
}
// Returns the linearized index of the output tile corresponding to the tile with offset [L, M, K]
CUTLASS_DEVICE
static int
output_tile_index(Params const& params, WorkTileInfo const& work_tile_info) {
uint64_t linear_idx_in_batch = UnderlyingScheduler::get_linear_idx_from_m_and_n(
work_tile_info.M_idx, work_tile_info.N_idx,
params.divmod_cluster_shape_major_,
params.divmod_cluster_shape_minor_,
params.divmod_cluster_blk_major_,
params.log_swizzle_size_,
params.raster_order_
);
uint64_t tiles_mn = params.divmod_batch_.divisor;
return tiles_mn * work_tile_info.L_idx + linear_idx_in_batch;
}
template <class ProblemShape, class ElementAccumulator>
static size_t
get_workspace_size(
Arguments const& args,
ProblemShape problem_shape,
KernelHardwareInfo const& hw_info,
uint32_t mma_warp_groups,
const uint32_t epilogue_subtile = 1) {
auto problem_shape_mnkl = cute::append<4>(problem_shape, 1);
ClusterShape cluster_shape;
TileShape tile_shape;
dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape);
uint32_t k_tile_per_output_tile = cute::size(cute::ceil_div(cute::shape<2>(problem_shape_mnkl), cute::shape<2>(TileShape{})));
return Params::get_workspace_size(
problem_blocks,
k_tile_per_output_tile,
to_gemm_coord(tile_shape),
to_gemm_coord(cluster_shape),
hw_info,
args.splits,
args.max_swizzle_size,
args.raster_order,
args.decomposition_mode,
mma_warp_groups,
sizeof_bits<BarrierType>::value,
sizeof_bits<ElementAccumulator>::value,
epilogue_subtile
);
}
template <class ProblemShape, class ElementAccumulator>
static cutlass::Status
initialize_workspace(
Arguments const& args,
void* workspace,
cudaStream_t stream,
ProblemShape const& problem_shape,
KernelHardwareInfo const& hw_info,
uint32_t mma_warp_groups,
const uint32_t epilogue_subtile = 1) {
auto problem_shape_mnkl = cute::append<4>(problem_shape, 1);
ClusterShape cluster_shape;
TileShape tile_shape;
dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape);
uint32_t k_tile_per_output_tile = cute::size(cute::ceil_div(cute::shape<2>(problem_shape_mnkl), cute::shape<2>(TileShape{})));
return Params::initialize_workspace(
workspace,
stream,
problem_blocks,
k_tile_per_output_tile,
to_gemm_coord(tile_shape),
to_gemm_coord(cluster_shape),
hw_info,
args.splits,
args.max_swizzle_size,
args.raster_order,
args.decomposition_mode,
mma_warp_groups,
sizeof_bits<BarrierType>::value,
sizeof_bits<ElementAccumulator>::value,
epilogue_subtile
);
}
template <class ProblemShape>
CUTLASS_HOST_DEVICE
static int
get_work_k_tile_count(WorkTileInfo const& work_tile_info, ProblemShape, TileShape) {
return work_tile_info.k_tile_count;
}
CUTLASS_HOST_DEVICE
static uint32_t
get_work_k_tile_start(WorkTileInfo const& work_tile_info) {
return work_tile_info.K_idx;
}
private:
// Sets the current stream-K work to compute within work_tile_info. If new_unit is true, work_tile_info
// is populated as a new unit of work. Otherwise, state existing in work_tile_info (e.g., remaining
// iterations) is used to find the next tile in the current work unit.
CUTLASS_DEVICE
static void
assign_work(
Params const& params,
uint64_t linear_idx,
WorkTileInfo& work_tile_info) {
uint64_t output_tile_id = linear_idx;
if (linear_idx >= params.units_per_problem_ * params.splits_) {
// Separate-reduction work
auto cluster_size = params.get_cluster_size();
// Divide up the linearized separate reduction units into clusters
auto cluster_linear_reduction_unit_idx = params.div_cluster_size((linear_idx - params.units_per_problem_));
uint64_t cluster_tile_idx, epi_subtile_idx;
params.divmod_epilogue_subtile_(cluster_tile_idx, epi_subtile_idx, cluster_linear_reduction_unit_idx);
// Bring the linearized tile ID back into the space of tiles, rather than clusters
output_tile_id = cluster_tile_idx * cluster_size;
work_tile_info.setup_separate_reduction(epi_subtile_idx);
}
else if (linear_idx >= params.sk_units_ && params.splits_ == 1) {
// Data-parallel work
output_tile_id = linear_idx - params.sk_units_ + params.sk_tiles_;
work_tile_info.K_idx = 0;
work_tile_info.k_tile_count = params.divmod_tiles_per_output_tile_.divisor;
work_tile_info.k_tile_remaining = params.divmod_tiles_per_output_tile_.divisor;
}
else {
// In the CUTLASS 2.x implementation of stream K, stream-K work is assigned to each stream-K
// threadblock individually. For the most part, the set of K iterations corresponding to stream-K
// work was divided amongst stream-K threadblocks, and a threadblock determined which tile
// it would compute a (potentially-partial) output tile for based on the space of k iterations
// assigned to it. This often results in stream-K threadblocks processing tiles with different
// offsets in the K dimension from one another. This can reduce locality, but is lmitied to the
// (generally few) waves of threadblocks assigned to compute stream-K work.
//
// With the introduction of threadblock clusters, there is additional benefit to maintaining
// locality in the K dimension: shared portions of operands can be multicasted to threadblocks
// within a cluster. Thus, we would like to ensure that the assignment of stream-K work to
// threadblocks respects the ability to perform multicasting.
//
// To do so, we divide up the linearized stream-K units into clusters and share the same K
// offsets for work within clusters.
auto cluster_linear_work_idx = params.div_cluster_size(linear_idx);
uint64_t group_idx;
params.divmod_sk_groups_(cluster_linear_work_idx, group_idx, cluster_linear_work_idx);
// Determine whether we are in a "big group" that will process an additional
// stream-K cluster tile.
auto sk_cluster_tiles = params.div_cluster_size(params.sk_tiles_);
auto sk_cluster_tiles_in_group = params.divmod_sk_groups_.divide(sk_cluster_tiles);
if (group_idx < params.big_groups_) {
++sk_cluster_tiles_in_group;
}
// Determine whether we are in a "big unit" within the group, that will process
// an additional K chunk in the group.
auto sk_tiles_in_group = sk_cluster_tiles_in_group * params.get_cluster_size();
auto k_tiles_in_group = sk_tiles_in_group * params.divmod_tiles_per_output_tile_.divisor;
auto k_tiles_per_unit_in_group = params.divmod_sk_units_per_group_.divide(k_tiles_in_group);
auto big_units_in_group = params.div_cluster_size(
k_tiles_in_group - (k_tiles_per_unit_in_group * params.divmod_sk_units_per_group_.divisor));
uint64_t split;
params.divmod_clusters_mnl_(split, cluster_linear_work_idx, cluster_linear_work_idx);
bool is_split_k = params.splits_ > 1;
auto big_unit_cmp_lhs = is_split_k ? split : cluster_linear_work_idx;
auto big_unit_cmp_rhs = is_split_k ? params.big_units_ : big_units_in_group;
auto linear_idx_mult = is_split_k ? params.divmod_tiles_per_output_tile_.divisor : k_tiles_per_unit_in_group;
auto k_tiles_per_split = is_split_k ? params.k_tiles_per_sk_unit_ : k_tiles_per_unit_in_group;
// Determine the starting k iteration computed by this stream-K work unit
uint32_t unit_iter_start = (linear_idx_mult * cluster_linear_work_idx) +
(k_tiles_per_split * split);
// Adjust the starting position and number of k iterations for "big units," which
// compute one extra iteration. If there are any big units, they will be the first
// in the linearized ID space.
auto k_tiles_in_my_split = k_tiles_per_split;
if (big_unit_cmp_lhs < big_unit_cmp_rhs) {
// Since the "big units" are the first units in the linearized ID space, each
// of the units preceding this big unit computed one extra iteration. Thus,
// we must offset our start iteration by the number of units that precede
// the current unit in the linearized ID space.
unit_iter_start += big_unit_cmp_lhs;
++k_tiles_in_my_split;
}
else {
// Increment by one for each of the big clusters (since all big units precede this unit)
unit_iter_start += big_unit_cmp_rhs;
}
if (!is_split_k) {
// Adjust the unit starting position and number of tiles to avoid
// computing splits of size less than min_iters_per_sk_unit_
int unused, start_tile_k_tile;
params.divmod_tiles_per_output_tile_(unused, start_tile_k_tile, unit_iter_start);
if (start_tile_k_tile < Params::min_iters_per_sk_unit_) {
// Starting K tile is in range [0, Params::min_iters_per_sk_unit_), which means that another
// stream-K unit will be computing a split with fewer than Params::min_iters_per_sk_unit_ K tiles.
// Adjust our work to take over these K tiles.
unit_iter_start -= start_tile_k_tile;
k_tiles_in_my_split += start_tile_k_tile;
}
else if (start_tile_k_tile > (params.divmod_tiles_per_output_tile_.divisor - Params::min_iters_per_sk_unit_)) {
// Starting K tile is within the final Params::min_iters_per_sk_unit_ K tiles of some output tile,
// which means that this unit will compute a split with fewer than Params::min_iters_per_sk_unit_ K tiles.
// Adjust our work to shed these K tiles to a neighboring stream-K unit that will compute more consecutive K tiles.
auto adjustment_tiles = (params.divmod_tiles_per_output_tile_.divisor - start_tile_k_tile);
unit_iter_start += adjustment_tiles;
k_tiles_in_my_split -= adjustment_tiles;
}
}
if (work_tile_info.k_tile_count == 0) {
// This is a new unit
if (!is_split_k) {
//
// Adjust the unit ending position and number of tiles to avoid
// computing splits of size less than min_iters_per_sk_unit_
//
// Begin by assuming that no adjustment is needed
auto initial_unit_iter_end = unit_iter_start + k_tiles_in_my_split;
int unused, end_tile_k_tile;
params.divmod_tiles_per_output_tile_(unused, end_tile_k_tile, initial_unit_iter_end);
if (end_tile_k_tile < Params::min_iters_per_sk_unit_) {
// Ending K tile is within the first Params::min_iters_per_sk_unit_ K tiles of some output tile,
// which means that this unit will compute a split with fewer than Params::min_iters_per_sk_unit_ K tiles.
// Adjust our work to shed these K tiles to a neighboring stream-K unit that will compute more consecutive K tiles.
k_tiles_in_my_split -= end_tile_k_tile;
}
else if (end_tile_k_tile > (params.divmod_tiles_per_output_tile_.divisor - Params::min_iters_per_sk_unit_)) {
// Ending K tile is within the final Params::min_iters_per_sk_unit_ K tiles of some output tile,
// which means that some other unit will compute a split with fewer than Params::min_iters_per_sk_unit_ K tiles.
// Adjust our work to take on these K tiles.
k_tiles_in_my_split += (params.divmod_tiles_per_output_tile_.divisor - end_tile_k_tile);
}
}
work_tile_info.k_tile_remaining = k_tiles_in_my_split;
}
uint32_t unit_iter_end = unit_iter_start + work_tile_info.k_tile_remaining - 1;
// Find the output tile corresponding to the final k tile covered by this
// work unit. Stream-K work units will work backwards in terms of the tiles they
// are responsible computing. This is beneficial because the final (partial)
// tile computed by a stream-K block is typically the beginning of the output
// tile, while the beginning (partial) tile is typically the ending of another
// output tile. Since ending portions of an output tile must reduce across
// other work units computing portions of that output tile, it is preferable
// for them to be computed later, so as to reduce the likelihood of blocking
// on other work.
auto output_tile_id_in_group = params.divmod_tiles_per_output_tile_.divide(unit_iter_end);
uint32_t output_tile_iter_start = output_tile_id_in_group * params.divmod_tiles_per_output_tile_.divisor;
uint32_t output_tile_iter_end = output_tile_iter_start + params.divmod_tiles_per_output_tile_.divisor;
// Convert the output tile from the linearized space within each group to the
// overall linearized space.
output_tile_id = (output_tile_id_in_group * params.divmod_sk_groups_.divisor) + group_idx;
// Bring the linearized tile ID back into the space of tiles, rather than clusters
output_tile_id *= params.get_cluster_size();
auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster();
// The final linearized tile ID is in units of the cluster dimension over which we rasterize.
if (params.raster_order_ == RasterOrder::AlongN) {
output_tile_id += cta_n_in_cluster * params.divmod_cluster_shape_minor_.divisor;
}
else {
output_tile_id += cta_m_in_cluster * params.divmod_cluster_shape_minor_.divisor;
}
// The unit's starting k iteration in the current tile is either the starting
// iteration for the tile as a whole, or the starting k iteration for the unit
// as a whole (if the latter is greater than the former).
uint32_t tile_iter_start = max(output_tile_iter_start, unit_iter_start);
// Similarly, the unit's ending k iteration (exclusive) is either the end of
// the current tile it is assigned, or the ending iteration of the unit as a whole
// (if the latter is less than the former).
uint32_t tile_iter_end = min(output_tile_iter_end, unit_iter_end + 1);
// Set the k offset to be the starting k tile for this output tile
work_tile_info.K_idx = static_cast<int32_t>(tile_iter_start - output_tile_iter_start);
work_tile_info.k_tile_count = tile_iter_end - tile_iter_start;
}
uint64_t work_idx_l, remainder;
params.divmod_batch_(work_idx_l, remainder, output_tile_id);
uint64_t cta_per_grid_dim = params.divmod_cluster_shape_minor_.divide(remainder);
auto [work_idx_m, work_idx_n] = UnderlyingScheduler::get_work_idx_m_and_n(
cta_per_grid_dim,
params.divmod_cluster_shape_major_,
params.divmod_cluster_shape_minor_,
params.divmod_cluster_blk_major_,
params.log_swizzle_size_,
params.raster_order_
);
// Set the M, N, and L block offsets
work_tile_info.M_idx = work_idx_m;
work_tile_info.N_idx = work_idx_n;
work_tile_info.L_idx = static_cast<int32_t>(work_idx_l);
}
// Returns the starting and ending peer ID of this tile
CUTLASS_HOST_DEVICE
static auto
tile_peer_range(Params const& params, uint32_t tile_idx, uint32_t cur_k_tile) {
auto tile_idx_in_cluster_path = params.div_cluster_size(tile_idx);
auto start_k_tile = params.divmod_tiles_per_output_tile_.divisor * tile_idx_in_cluster_path;
auto end_k_tile = start_k_tile + params.divmod_tiles_per_output_tile_.divisor - 1;
auto big_unit_k_tiles = params.big_units_ * (params.k_tiles_per_sk_unit_ + 1);
auto adjust_unit = [&](uint32_t k_tile, uint32_t unit_idx, uint32_t k_tiles_per_unit) {
auto unit_k_start = unit_idx * k_tiles_per_unit;
auto unit_k_end = unit_k_start + k_tiles_per_unit;
if (k_tile - start_k_tile < Params::min_iters_per_sk_unit_ &&
unit_k_end - start_k_tile < Params::min_iters_per_sk_unit_) {
// k_tile is within the first min_iters_per_sk_unit_ K tiles of this output tile,
// and the stream-K unit computes fewer than min_iters_per_sk_unit_ K tiles for this
// output tile. This work will thus be subsumed by the next stream-K unit.
++unit_idx;
}
if (end_k_tile + 1 - k_tile < Params::min_iters_per_sk_unit_ &&
end_k_tile + 1 - unit_k_start < Params::min_iters_per_sk_unit_) {
// k_tile is within the last min_iters_per_sk_unit_ K tiles of this output tile,
// and the stream-K unit computes fewer than min_iters_per_sk_unit_ K tiles for this
// output tile. This work will thus be subsumed by the previous stream-K unit.
--unit_idx;
}
return unit_idx;
};
// Lambda to find the ID of the stream-K unit that computes this K tile
auto find_unit = [&](uint32_t k_tile) {
if (k_tile < big_unit_k_tiles) {
// The tile is within the "big unit range"
auto k_tiles_per_unit = params.k_tiles_per_sk_unit_ + 1;
auto unit_idx = k_tile / k_tiles_per_unit;
return static_cast<uint64_t>(adjust_unit(k_tile, unit_idx, k_tiles_per_unit));
}
else {
// The tile is after the "big unit range." Account for this by finding the "normal unit"
// that it belongs to, and then offsetting by the number of big units
auto k_tiles_per_unit = params.k_tiles_per_sk_unit_;
auto unit_idx = ((k_tile - big_unit_k_tiles) / params.k_tiles_per_sk_unit_) + (params.big_units_);
return static_cast<uint64_t>(adjust_unit(k_tile, unit_idx, k_tiles_per_unit));
}
};
return cute::make_tuple(find_unit(start_k_tile), find_unit(cur_k_tile), find_unit(end_k_tile));
}
};
} // namespace cutlass::gemm::kernel::detail
| include/cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp",
"repo_id": "include",
"token_count": 14895
} | 39 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming
expectations about data layout of the global memory fragments, data types,
and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp
instructions.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/gemm/warp/default_mma_complex_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h"
#include "cutlass/gemm/threadblock/mma_multistage.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex double-precision
///
/// A: column-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, InstructionShape_,
complex<double>, layout::ColumnMajor,
complex<double>, layout::RowMajor,
complex<double>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = complex<double>;
using LayoutA = layout::ColumnMajor;
using ElementB = complex<double>;
using LayoutB = layout::RowMajor;
using ElementC = complex<double>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped 128
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous128b;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
/// Partial specialization for complex double-precision
///
/// A: column-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, InstructionShape_,
complex<double>, layout::ColumnMajor,
complex<double>, layout::ColumnMajor,
complex<double>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = complex<double>;
using LayoutA = layout::ColumnMajor;
using ElementB = complex<double>;
using LayoutB = layout::ColumnMajor;
using ElementC = complex<double>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
using Operator = Operator_;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped 128
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex double-precision
///
/// A: row-major
/// B: column-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, InstructionShape_,
complex<double>, layout::RowMajor,
complex<double>, layout::ColumnMajor,
complex<double>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = complex<double>;
using LayoutA = layout::RowMajor;
using ElementB = complex<double>;
using LayoutB = layout::ColumnMajor;
using ElementC = complex<double>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped 128
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise128x4;
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
/// Partial specialization for complex double-precision
///
/// A: row-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, InstructionShape_,
complex<double>, layout::RowMajor,
complex<double>, layout::RowMajor,
complex<double>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = complex<double>;
using LayoutA = layout::RowMajor;
using ElementB = complex<double>;
using LayoutB = layout::RowMajor;
using ElementC = complex<double>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped 128
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise128x4;
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous128b;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex floating-point
///
/// A: column-major
/// B: column-major
/// Operator: arch::OpMultiplyAddComplex
/// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<16, 8, 8>,
complex<float>, layout::ColumnMajor,
complex<float>, layout::ColumnMajor,
complex<float>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<16, 8, 8>;
using ElementA = complex<float>;
using LayoutA = layout::ColumnMajor;
using ElementB = complex<float>;
using LayoutB = layout::ColumnMajor;
using ElementC = complex<float>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped
static int const kAccessSizeInBits = 64;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
/// Partial specialization for complex floating-point
///
/// A: column-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex
/// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<16, 8, 8>,
complex<float>, layout::ColumnMajor,
complex<float>, layout::RowMajor,
complex<float>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<16, 8, 8>;
using ElementA = complex<float>;
using LayoutA = layout::ColumnMajor;
using ElementB = complex<float>;
using LayoutB = layout::RowMajor;
using ElementC = complex<float>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped
static int const kAccessSizeInBits = 64;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex floating-point
///
/// A: row-major
/// B: column-major
/// Operator: arch::OpMultiplyAddComplex
/// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<16, 8, 8>,
complex<float>, layout::RowMajor,
complex<float>, layout::ColumnMajor,
complex<float>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<16, 8, 8>;
using ElementA = complex<float>;
using LayoutA = layout::RowMajor;
using ElementB = complex<float>;
using LayoutB = layout::ColumnMajor;
using ElementC = complex<float>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped
static int const kAccessSizeInBits = 64;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise;
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex floating-point
///
/// A: row-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex
/// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<16, 8, 8>,
complex<float>, layout::RowMajor,
complex<float>, layout::RowMajor,
complex<float>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<16, 8, 8>;
using ElementA = complex<float>;
using LayoutA = layout::RowMajor;
using ElementB = complex<float>;
using LayoutB = layout::RowMajor;
using ElementC = complex<float>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped
static int const kAccessSizeInBits = 64;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise;
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex SIMT operation
///
/// A: column-major
/// B: column-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
typename RealA,
typename RealB,
typename RealC,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<1, 1, 1>,
complex<RealA>, layout::ColumnMajor,
complex<RealB>, layout::ColumnMajor,
complex<RealC>, LayoutC_,
arch::OpClassSimt,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = complex<RealA>;
using LayoutA = layout::ColumnMajor;
using ElementB = complex<RealB>;
using LayoutB = layout::ColumnMajor;
using ElementC = complex<RealC>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of access
static int const kAccessSizeInBits = sizeof_bits<ElementA>::value;
/// No vectorized accesses
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
SmemThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4;
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
1, /// 1 partition along K dimension
kTransformA, /// Transform for A
kTransformB /// Transform for B
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, Shape::kK / 32>,
WarpCount::kK>;
};
/// Partial specialization for complex SIMT operation
///
/// A: column-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
typename RealA,
typename RealB,
typename RealC,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<1, 1, 1>,
complex<RealA>, layout::ColumnMajor,
complex<RealB>, layout::RowMajor,
complex<RealC>, LayoutC_,
arch::OpClassSimt,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = complex<RealA>;
using LayoutA = layout::ColumnMajor;
using ElementB = complex<RealB>;
using LayoutB = layout::RowMajor;
using ElementC = complex<RealC>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of access
static int const kAccessSizeInBits = sizeof_bits<ElementA>::value;
/// No vectorized accesses
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4;
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
1, /// 1 partition along K dimension
kTransformA, /// Transform for A
kTransformB /// Transform for B
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, 0>, // or Shape::kK / 32
WarpCount::kK>;
};
/// Partial specialization for complex SIMT operation
///
/// A: row-major
/// B: column-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
typename RealA,
typename RealB,
typename RealC,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<1, 1, 1>,
complex<RealA>, layout::RowMajor,
complex<RealB>, layout::ColumnMajor,
complex<RealC>, LayoutC_,
arch::OpClassSimt,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = complex<RealA>;
using LayoutA = layout::RowMajor;
using ElementB = complex<RealB>;
using LayoutB = layout::ColumnMajor;
using ElementC = complex<RealC>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of access
static int const kAccessSizeInBits = sizeof_bits<ElementA>::value;
/// No vectorized accesses
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
SmemThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
SmemThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4;
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
1, /// 1 partition along K dimension
kTransformA, /// Transform for A
kTransformB /// Transform for B
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<Shape::kK / 32, 0>,
MatrixShape<0, Shape::kK / 32>,
WarpCount::kK>;
};
/// Partial specialization for complex SIMT operation
///
/// A: row-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
typename RealA,
typename RealB,
typename RealC,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<1, 1, 1>,
complex<RealA>, layout::RowMajor,
complex<RealB>, layout::RowMajor,
complex<RealC>, LayoutC_,
arch::OpClassSimt,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = complex<RealA>;
using LayoutA = layout::RowMajor;
using ElementB = complex<RealB>;
using LayoutB = layout::RowMajor;
using ElementC = complex<RealC>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of access
static int const kAccessSizeInBits = sizeof_bits<ElementA>::value;
/// No vectorized accesses
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
SmemThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4;
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
1, /// 1 partition along K dimension
kTransformA, /// Transform for A
kTransformB /// Transform for B
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<Shape::kK / 32, 0>,
MatrixShape<0, 0>, // or Shape::kK / 32
WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h",
"repo_id": "include",
"token_count": 22422
} | 40 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Used for partial specialization
typename Enable = bool
>
class MmaSingleStage : public MmaBase<Shape_, Policy_, 1> {
public:
///< Base class
using Base = MmaBase<Shape_, Policy_, 1>;
using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory
using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory
using ElementC = ElementC_; ///< Data type of accumulator matrix
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
using Policy = Policy_; ///< Policy describing tuning details
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
//
// Dependent types
//
/// Fragment of operand A loaded from global memory
using FragmentA = typename IteratorA::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB = typename IteratorB::Fragment;
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
using ArchTag = arch::Sm70;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
// staticaly assert kStages for MmaSingleStage is 1 (single stage mma pipeline)
static_assert((Base::kStages==1), "MmaSingleStage requires kStages set to value 1");
private:
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
protected:
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaSingleStage(
typename Base::SharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM
int thread_idx, ///< ID within the threadblock
int warp_idx, ///< ID of warp
int lane_idx ///< ID of each thread within a warp
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
int gemm_k_iterations, ///< number of iterations of the mainloop
FragmentC &accum, ///< destination accumulator tile
IteratorA iterator_A, ///< iterator over A operand in global memory
IteratorB iterator_B, ///< iterator over B operand in global memory
FragmentC const &src_accum) { ///< source accumualtor tile
//
// Prologue
//
// Perform accumulation in the 'd' output operand
accum = src_accum;
FragmentA tb_frag_A;
FragmentB tb_frag_B;
tb_frag_A.clear();
tb_frag_B.clear();
// The last kblock is loaded in the prolog
iterator_A.load(tb_frag_A);
iterator_B.load(tb_frag_B);
++iterator_A;
++iterator_B;
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA warp_frag_A;
WarpFragmentB warp_frag_B;
Operator warp_mma;
// Avoid reading out of bounds
iterator_A.clear_mask(gemm_k_iterations <= 1);
iterator_B.clear_mask(gemm_k_iterations <= 1);
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
this->smem_iterator_A_.store(tb_frag_A);
this->smem_iterator_B_.store(tb_frag_B);
__syncthreads();
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_A);
this->warp_tile_iterator_B_.load(warp_frag_B);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
warp_mma(accum, warp_frag_A, warp_frag_B, accum);
}
// Add negative offsets to return smem load iterators to the 'start' of the shared memory
this->warp_tile_iterator_A_.add_tile_offset({0, -Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset({-Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
__syncthreads();
iterator_A.load(tb_frag_A);
iterator_B.load(tb_frag_B);
++iterator_A;
++iterator_B;
// Avoid reading out of bounds if this was the last loop iteration
iterator_A.clear_mask(gemm_k_iterations <= 2);
iterator_B.clear_mask(gemm_k_iterations <= 2);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/mma_singlestage.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/mma_singlestage.h",
"repo_id": "include",
"token_count": 3396
} | 41 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/functional.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
namespace detail {
template <
/// Data type of real & imag members of complex numbers in the SourceFragment
typename RealElement,
/// Destination fragment required by the mma operation
typename DestinationFragment,
/// Source fragment holding complex<RealElement> elements
typename SourceFragment,
/// Number of mma operations performed
typename MmaIterations,
/// Shape of operand elements
typename MmaOperandShape,
/// Complex transform on A operand
ComplexTransform Transform_,
/// Operand A or Operand B
Operand Operand_,
/// Floating-point rounding style for big part
FloatRoundStyle RoundBig_,
/// Floating-point rounding style for small part
FloatRoundStyle RoundSmall_>
struct UnpackComplexConvertAndPackForMmaFastF32;
// Partial specialization for OperandA and Congruous smem layout
template <
typename RealElement,
typename DestinationFragment,
typename SourceFragment,
typename MmaIterations,
typename MmaOperandShape,
ComplexTransform Transform_,
FloatRoundStyle RoundBig_,
FloatRoundStyle RoundSmall_>
struct UnpackComplexConvertAndPackForMmaFastF32 <
RealElement,
DestinationFragment,
SourceFragment,
MmaIterations,
MmaOperandShape,
Transform_,
Operand::kA,
RoundBig_,
RoundSmall_> {
//
// Type definitions
//
static Operand const kOperand = Operand::kA;
static ComplexTransform const kTransform = Transform_;
static FloatRoundStyle const kRoundBig = RoundBig_;
static FloatRoundStyle const kRoundSmall = RoundSmall_;
// Data type of elements in the destination fragment
using MmaElement = typename DestinationFragment::Element;
// Numeric convertor MmaElementBig, MmaElementSmall <= RealElement
using Converter = NumericConverterFastF32<kRoundBig, kRoundSmall>;
// Operand layout parameters
using SourceFragmentLayout = layout::ColumnMajor;
static int const kLdm = MmaIterations::kRow * MmaOperandShape::kRow;
// BigSmall Fragment holding two TF32 elements (big, small) for every float
using BigSmallFragment = Array<MmaElement, 2>;
/// Index in fargments for the big and small part
static int const kBigIndex = 0;
static int const kSmallIndex = 1;
/// Ctor
CUTLASS_DEVICE
UnpackComplexConvertAndPackForMmaFastF32() {}
CUTLASS_DEVICE
void operator()(DestinationFragment *dest, SourceFragment const &source) {
Converter convert_op;
SourceFragmentLayout layout(kLdm);
DestinationFragment *dest_big_ = reinterpret_cast<DestinationFragment*>(dest);
DestinationFragment *dest_small_ = reinterpret_cast<DestinationFragment*>(&dest[MmaIterations::kRow * 2]);
CUTLASS_PRAGMA_UNROLL
for(int i=0; i<MmaIterations::kRow; i++) {
int pos = 0;
CUTLASS_PRAGMA_UNROLL
for(int c=0; c<MmaOperandShape::kColumn; c++) {
CUTLASS_PRAGMA_UNROLL
for(int r=0; r<MmaOperandShape::kRow; r++) {
// Logical position of element in source fragment
int row = r + i * MmaOperandShape::kRow;
int col = c;
// Access complex<RealElement> and apply rounding on real and imag parts
BigSmallFragment a = convert_op(source[layout(MatrixCoord{row,col})].real());
BigSmallFragment b = convert_op(source[layout(MatrixCoord{row,col})].imag());
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest_big_[i][pos] = a[kBigIndex];
dest_big_[i+MmaIterations::kRow][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kBigIndex] : b[kBigIndex]);
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest_small_[i][pos] = a[kSmallIndex];
dest_small_[i+MmaIterations::kRow][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kSmallIndex] : b[kSmallIndex]);
// Next position
pos++;
}
}
}
}
};
// Partial specialization for OperandB and Congruous smem layout
template <
typename RealElement,
typename DestinationFragment,
typename SourceFragment,
typename MmaIterations,
typename MmaOperandShape,
ComplexTransform Transform_,
FloatRoundStyle RoundBig_,
FloatRoundStyle RoundSmall_>
struct UnpackComplexConvertAndPackForMmaFastF32 <
RealElement,
DestinationFragment,
SourceFragment,
MmaIterations,
MmaOperandShape,
Transform_,
Operand::kB,
RoundBig_,
RoundSmall_> {
//
// Type definitions
//
static Operand const kOperand = Operand::kB;
static ComplexTransform const kTransform = Transform_;
static FloatRoundStyle const kRoundBig = RoundBig_;
static FloatRoundStyle const kRoundSmall = RoundSmall_;
// Data type of elements in the destination fragment
using MmaElement = typename DestinationFragment::Element;
// Numeric convertor MmaElementBig, MmaElementSmall <= RealElement
using Converter = NumericConverterFastF32<kRoundBig, kRoundSmall>;
// Operand layout parameters
using SourceFragmentLayout = layout::RowMajor;
static int const kLdm = MmaIterations::kColumn * MmaOperandShape::kColumn;
// BigSmall Fragment holding two TF32 elements (big, small) for every float
using BigSmallFragment = Array<MmaElement, 2>;
/// Index in fargments for the big and small part
static int const kBigIndex = 0;
static int const kSmallIndex = 1;
/// Ctor
CUTLASS_DEVICE
UnpackComplexConvertAndPackForMmaFastF32() {}
CUTLASS_HOST_DEVICE
void operator()(DestinationFragment *dest, SourceFragment const &source) {
Converter convert_op;
SourceFragmentLayout layout(kLdm);
DestinationFragment *dest_big_ = reinterpret_cast<DestinationFragment*>(dest);
DestinationFragment *dest_small_ = reinterpret_cast<DestinationFragment*>(&dest[MmaIterations::kColumn * 2]);
CUTLASS_PRAGMA_UNROLL
for(int i=0; i<MmaIterations::kColumn; i++) {
int pos = 0;
CUTLASS_PRAGMA_UNROLL
for(int c=0; c<MmaOperandShape::kColumn; c++) {
CUTLASS_PRAGMA_UNROLL
for(int r=0; r<MmaOperandShape::kRow; r++) {
// Logical position of element in source fragment
int row = r;
int col = c + i * MmaOperandShape::kColumn;
// Access complex<RealElement> apply rounding on real and imag parts
BigSmallFragment a = convert_op(source[layout(MatrixCoord{row,col})].real());
BigSmallFragment b = convert_op(source[layout(MatrixCoord{row,col})].imag());
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest_big_[i][pos] = a[kBigIndex];
dest_big_[i+MmaIterations::kColumn][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kBigIndex] : b[kBigIndex]);
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest_small_[i][pos] = a[kSmallIndex];
dest_small_[i+MmaIterations::kColumn][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kSmallIndex] : b[kSmallIndex]);
// next position
pos++;
}
}
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transform on B operand
ComplexTransform TransformB = ComplexTransform::kNone,
/// Used for partial specialization
typename Enable = bool
>
class MmaComplexTensorOpFastF32;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex:
// Operands data type: complex<float>
// Rounding: float -> tfloat32_t (round half_ulp_truncate nearest)
// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
// Output data type: complex<float>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB,
/// Used for partial specialization
typename Enable
>
class MmaComplexTensorOpFastF32<
Shape_,
complex<float>,
LayoutA_,
complex<float>,
LayoutB_,
complex<float>,
LayoutC_,
Policy_,
TransformA,
TransformB,
Enable> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of members of complex multiplicand A
using RealElementA = float;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of members of complex multiplicand B
using RealElementB = float;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of members of complex accumulator matrix C
using RealElementC = float;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddComplexFastF32;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Tune F32 to TF32 big small conversion for complex<float> operation
/// Different combination of big small conversin can cause different tradeoff
/// between speed and accuracy. Generally, use round_half_ulp_truncate can
/// improve the performance but hur the accuracy.
using ComplexFastF32 = FastF32 <
FloatRoundStyle::round_toward_zero, // kRoundBigA
FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallA
FloatRoundStyle::round_toward_zero, // kRoundBigB
FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallB
TensorFloat32Op::k3xTF32 // Number of TF32 operations
>;
/// Index in fargments for the big and small part
static int const kBigIndex = 0;
static int const kSmallIndex = 1;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
// (4 times the original FragmentA::kElements)
// (real_big), (imag_big), (real_small), (imag_small)
using TransformedFragmentA = Array<typename ArchMmaOperator::ElementA,
FragmentA::kElements * 2 * 2>;
// Fragment bisecting big and small sections
// (real_big, imag_big), (real_small, imag_small)
using AccessTypeFragmentA = Array<typename ArchMmaOperator::ElementA,
FragmentA::kElements * 2>;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
// (4 times the original FragmentB::kElements)
// (real_big), (imag_big), (real_small), (imag_small)
using TransformedFragmentB = Array<typename ArchMmaOperator::ElementB,
FragmentB::kElements * 2 * 2>;
// Fragment bisecting big and small sections
// (real_big, imag_big), (real_small, imag_small)
using AccessTypeFragmentB = Array<typename ArchMmaOperator::ElementB,
FragmentB::kElements * 2>;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of complex products operations performed (one complex product needs four mma instructions)
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
//
// Alias types for underlying real-valued matrix multiply operator
//
using InstMmaOperandA = typename ArchMmaOperator::FragmentA;
using InstMmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(platform::is_same<cutlass::gemm::GemmShape<16, 8, 8>, typename ArchMmaOperator::Shape>::value,
"This implementation only supports mma.m16n8k8 math instructions.");
static_assert(InstMmaOperandA::kElements == 4,
"This implementation only supports math instructions in which exactly four element is needed for the A operand."
"We can geneneralize later.");
static_assert(InstMmaOperandB::kElements == 2,
"This implementation only supports math instructions in which exactly two element is needed for the B operand."
"We can geneneralize later.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOpFastF32() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C
) const {
AccessTypeFragmentA const *complex_A = reinterpret_cast<AccessTypeFragmentA const*>(&A);
AccessTypeFragmentB const *complex_B = reinterpret_cast<AccessTypeFragmentB const*>(&B);
//
// Accumulate in place
//
D = C;
complex_mma_operator(D, complex_A[kSmallIndex], complex_B[kBigIndex], D);
complex_mma_operator(D, complex_A[kBigIndex], complex_B[kSmallIndex], D);
complex_mma_operator(D, complex_A[kBigIndex], complex_B[kBigIndex], D);
if (ComplexFastF32::kPrecision == TensorFloat32Op::k4xTF32)
complex_mma_operator(D, complex_A[kSmallIndex], complex_B[kSmallIndex], D);
}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void complex_mma_operator(
FragmentC &D,
AccessTypeFragmentA const &complex_A,
AccessTypeFragmentB const &complex_B,
FragmentC const &C
) const {
// Instruction Operands A & B holding real part followed by imaginary part for mma operations
InstMmaOperandA const *operand_A = reinterpret_cast<InstMmaOperandA const *>(&complex_A);
InstMmaOperandB const *operand_B = reinterpret_cast<InstMmaOperandB const *>(&complex_B);
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A[m], operand_B[n], *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A[m], operand_B[n+MmaIterations::kColumn], *accum);
}
// mma(accum.real(), a.imag(), -b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// negate OperandB to accumulate -(a.imag()*b.imag())
// negating OperandB emits less instrucitons than negating OperandA as OperandB has less elements
negate<InstMmaOperandB> negate_op;
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A[m+MmaIterations::kRow], negate_op(operand_B[n+MmaIterations::kColumn]), *accum);
}
// mma(accum.imag(), a.imag(), b.real(), accum.imag())
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A[m+MmaIterations::kRow], operand_B[n], *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
detail::UnpackComplexConvertAndPackForMmaFastF32 <
RealElementA,
InstMmaOperandA,
FragmentA,
MmaIterations,
MatrixShape<2, 2>,
kTransformA,
Operand::kA,
ComplexFastF32::kRoundBigA,
ComplexFastF32::kRoundSmallA> convert_A;
detail::UnpackComplexConvertAndPackForMmaFastF32 <
RealElementB,
InstMmaOperandB,
FragmentB,
MmaIterations,
MatrixShape<2, 1>,
kTransformB,
Operand::kB,
ComplexFastF32::kRoundBigB,
ComplexFastF32::kRoundSmallB> convert_B;
// Convert Fragment[A|B] holding complex<RealElement[A|B]> to InstMmaOperand[A|B] holding InstMmaOperand[A|B]::Element
convert_A(reinterpret_cast<InstMmaOperandA *>(&dst_A), A);
convert_B(reinterpret_cast<InstMmaOperandB *>(&dst_B), B);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_complex_tensor_op_fast_f32.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_complex_tensor_op_fast_f32.h",
"repo_id": "include",
"token_count": 8071
} | 42 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads,
/// Number of partitions along K dimension
int PartitionsK_ = 1>
class MmaTensorOpMultiplicandTileIterator;
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
64>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, 64>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kContiguous % InstructionShape::kContiguous),
"Shape of warp-level Mma must be divisible by operator shape.");
// Determine number of elements along outer dimension per individual LDSM op
static int const kLdsmOpOuter = Layout::kElementsPerAccess;
static int const kLdsmOpInner = 8;
static_assert(!(Shape::kContiguous % kLdsmOpOuter),
"Shape of warp-level mma must be divisible by LDSM's fundamental tile size.");
static_assert(!(Shape::kStrided % kLdsmOpInner),
"Shape of warp-level mma must be divisible by LDSM's fundamental tile size.");
/// Shape of one individual LDSM instruction
static int const LdsmShapeStrided =
InstructionShape::kStrided / kLdsmOpInner;
static int const LdsmShapeContiguous = 4 / LdsmShapeStrided;
using LdsmShape =
layout::PitchLinearShape<LdsmShapeContiguous, LdsmShapeStrided>;
/// Number and arrangement of LDSM instructions
using LdsmIterations = layout::PitchLinearShape<
Shape::kContiguous / Layout::kElementsPerAccess / LdsmShapeContiguous,
1>;
/// Number of groups for each tile
static int const kGroupsPerTile =
Shape::kStrided / InstructionShape::kStrided;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Number of internal pointers needed to reference shared memory
static int const kPointerCount =
Layout::TileShape::kContiguous / Policy::LdsmShape::kContiguous;
/// Pointer type used for accesses
using AccessType = Array<Element, Layout::kElementsPerAccess>;
/// Internal counter used to jump to next K partition
int k_group_idx_;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_[kPointerCount];
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0),
k_group_idx_(0) {
int quad_pair = (lane_id >> 3);
int quad_quad = (lane_id >> 4);
int lane_in_quad = (lane_id & 3);
int lane_in_quad_pair = (lane_id & 7);
int lane_in_quad_quad = (lane_id & 15);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount; ++i) {
int partition_contiguous_idx = -1;
int access_contiguous_idx = -1;
int access_strided_idx = -1;
if (Policy::LdsmShape::kContiguous == 4) {
// Matrix multiply 1688 A/B
// Q0 Q1 Q2 Q3 (Q stands for 1 8x128bit block).
// Four blocks are next to each other in the contiguous dimension.
partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ i);
access_contiguous_idx = (quad_pair ^ lane_in_quad);
access_strided_idx = lane_in_quad_pair;
} else if (Policy::LdsmShape::kContiguous == 2 &&
kOperand == Operand::kA) {
// Matrix multiply 16816 A
// Q0 Q1
// Q2 Q3
partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ (i >> 1));
access_contiguous_idx =
(((quad_pair & 1) + ((i & 1) << 1)) ^ lane_in_quad);
access_strided_idx = lane_in_quad_pair + (lane_id >> 4 << 3);
} else if (Policy::LdsmShape::kContiguous == 2 &&
kOperand == Operand::kB) {
// Matrix multiply 16816 B
// Q0 Q2
// Q1 Q3
partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ (i >> 1));
access_contiguous_idx = ((quad_quad + ((i & 1) << 1)) ^ lane_in_quad);
access_strided_idx = lane_in_quad_quad;
} else if (Policy::LdsmShape::kContiguous == 1) {
// Matrix multiply 16832.SP B
// Q0
// Q1
// Q2
// Q3
partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ (i >> 2));
access_contiguous_idx = ((i & 3) ^ lane_in_quad);
access_strided_idx = lane_id;
}
int access_contiguous =
partition_contiguous_idx * Layout::PartitionShape::kContiguous +
access_contiguous_idx;
int access_strided = access_strided_idx;
pointer_[i] = reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int contiguous_offset = tile_offset.contiguous();
if (Shape::kContiguous ==
Layout::PartitionShape::kContiguous * Layout::kElementsPerAccess) {
if (tile_offset.contiguous() % 2) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount / 2; ++i) {
AccessType const *tmp_pointer = pointer_[i];
pointer_[i] = pointer_[i + kPointerCount / 2];
pointer_[i + kPointerCount / 2] = tmp_pointer;
}
}
contiguous_offset = (tile_offset.contiguous() >> 1) << 1;
}
int offset = (tile_offset.strided() * InstructionShape::kStrided) *
stride_ * Layout::kElementsPerAccess +
contiguous_offset * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
add_tile_offset({0, 1});
if (kPartitionsK > 1) {
++k_group_idx_;
// Jump to next stage
if (k_group_idx_ == Policy::kGroupsPerTile) {
k_group_idx_ = 0;
add_tile_offset(
{0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)});
}
}
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) *
Layout::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
Array<unsigned, Policy::LdsmShape::kCount> *fetch_ptr =
reinterpret_cast<Array<unsigned, Policy::LdsmShape::kCount> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsmIterations::kContiguous;
AccessType const *source_ptr =
pointer_[c % kPointerCount] +
Layout::TileShape::kContiguous * (c / kPointerCount) +
Policy::kLdsmOpInner * Policy::LdsmShape::kStrided * s * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
cutlass::arch::ldsm<layout::ColumnMajor, Policy::LdsmShape::kCount>(
fetch_ptr[access_idx],
source_byte_ptr
);
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no op
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread MMA.TF32 NT TensorOps. It
/// uses LDS.32 to load from shared memory and therefore must be initialized
/// with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCongruous<32, 32>, InstructionShape_,
OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand == Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for "
"A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCongruous<32, 32>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kContiguous % InstructionShape::kContiguous),
"Shape of warp-level Mma must be divisible by operator shape.");
// Determine number of elements along outer dimension per individual 32bit
// shared memory load op. Every one warp of 32bit shared memory load loads
// 8x4 elements
static int const kLdsOpInner = Layout::TileShape::kStrided;
static int const kLdsOpOuter = kThreads / kLdsOpInner;
static_assert(!(Shape::kContiguous % kLdsOpOuter),
"Shape of warp-level mma must be divisible by 32bit "
"fundamental tile size.");
static_assert(!(Shape::kStrided % kLdsOpInner),
"Shape of warp-level mma must be divisible by 32bit "
"fundamental tile size.");
/// Number of 32 bit shared memory load instructions needed by one MMA instruction
/// 1688 A 2x2
/// 1688 B 1x2
/// 16816 B 1x4
static int const LdsShapeContiguous =
InstructionShape::kContiguous / kLdsOpOuter;
static int const LdsShapeStrided = InstructionShape::kStrided / kLdsOpInner;
using LdsShape =
layout::PitchLinearShape<LdsShapeContiguous, LdsShapeStrided>;
/// Number and arrangement of LDS instructions
using LdsIterations = layout::PitchLinearShape<
Shape::kContiguous / LdsShapeContiguous / kLdsOpOuter, 1>;
/// Number of groups for each tile
static int const kGroupsPerTile =
Shape::kStrided / InstructionShape::kStrided;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Number of internal pointers needed to reference shared memory
static int const kPointerCount = Layout::TileShape::kContiguous *
Layout::kElementsPerAccess /
Policy::kLdsOpOuter;
/// Vectorized access is not used
static int const kElementsPerAccess = 1;
/// Pointer type used for accesses
using AccessType = Element;
/// Internal counter used to jump to next K partition
int k_group_idx_;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_[kPointerCount];
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() : stride_(0), byte_offset_(0) {}
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: stride_(ref.stride(0)), byte_offset_(0), k_group_idx_(0) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount; ++i) {
int access_strided = lane_id % Policy::kLdsOpInner;
int access_contiguous = (lane_id / Policy::kLdsOpInner) +
(access_strided ^ i) * Policy::kLdsOpOuter;
pointer_[i] = reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
int contiguous_offset = tile_offset.contiguous();
if (Shape::kContiguous ==
Layout::TileShape::kContiguous * Layout::kElementsPerAccess / 2) {
if (tile_offset.contiguous() % 2) {
// Matrix multiply 1688 pointer_[0] <=> pointer_[4] pointer_[1] <=> pointer_[5]
// pointer_[2] <=> pointer_[6] pointer_[3] <=> pointer_[7]
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount / 2; ++i) {
AccessType const *tmp_pointer = pointer_[i];
pointer_[i] = pointer_[i + kPointerCount / 2];
pointer_[i + kPointerCount / 2] = tmp_pointer;
}
}
contiguous_offset = (tile_offset.contiguous() >> 1) << 1;
}
int offset = (tile_offset.strided() * InstructionShape::kStrided) * stride_ +
contiguous_offset * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator++() {
add_tile_offset({0, 1});
if (kPartitionsK > 1) {
++k_group_idx_;
// Jump to next stage
if (k_group_idx_ == Policy::kGroupsPerTile) {
k_group_idx_ = 0;
add_tile_offset(
{0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)});
}
}
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &operator--() {
byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) *
kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { load_with_byte_offset(frag, 0); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
Element *fetch_ptr = reinterpret_cast<Element *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int ss = 0; ss < Policy::LdsShape::kStrided; ++ss) {
CUTLASS_PRAGMA_UNROLL
for (int cc = 0; cc < Policy::LdsShape::kContiguous; ++cc) {
int access_idx =
cc + (ss + (c + s * Policy::LdsIterations::kContiguous) *
Policy::LdsShape::kStrided) *
Policy::LdsShape::kContiguous;
int access_idx_contiguous = cc + c * Policy::LdsShape::kContiguous;
int access_idx_strided =
(ss + s * Policy::LdsShape::kStrided) * Policy::kLdsOpInner;
AccessType const *source_ptr =
pointer_[access_idx_contiguous % kPointerCount] +
Layout::TileShape::kContiguous * Layout::kElementsPerAccess *
(access_idx_contiguous / kPointerCount) +
access_idx_strided * stride_;
char const *source_byte_ptr =
reinterpret_cast<char const *>(source_ptr) + byte_offset +
byte_offset_;
fetch_ptr[access_idx] =
*reinterpret_cast<Element const *>(source_byte_ptr);
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous /
Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no op
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps with 64B warp tile
/// the contiguous dimension. This assumes Threadblock contiguous dimension has
/// the same size as the warp tile. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// This specialization can be merged into the general one. Most code is the same.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCongruous<16, 32>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Element number when the layout crosses
static int const kCrosswise = 32;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, kCrosswise>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kContiguous % InstructionShape::kContiguous),
"Shape of warp-level Mma must be divisible by operator shape.");
// Determine number of elements along outer dimension per individual LDSM op
static int const kLdsmOpOuter = Layout::kElementsPerAccess;
static int const kLdsmOpInner = 8;
static_assert(!(Shape::kContiguous % kLdsmOpOuter),
"Shape of warp-level mma must be divisible by LDSM's fundamental tile size.");
static_assert(!(Shape::kStrided % kLdsmOpInner),
"Shape of warp-level mma must be divisible by LDSM's fundamental tile size.");
/// Shape of one individual LDSM instruction
static int const LdsmShapeStrided =
InstructionShape::kStrided / kLdsmOpInner;
static int const LdsmShapeContiguous = 4 / LdsmShapeStrided;
using LdsmShape =
layout::PitchLinearShape<LdsmShapeContiguous, LdsmShapeStrided>;
/// Number and arrangement of LDSM instructions
using LdsmIterations = layout::PitchLinearShape<
Shape::kContiguous / Layout::kElementsPerAccess / LdsmShapeContiguous,
1>;
/// Number of groups for each tile
static int const kGroupsPerTile =
Shape::kStrided / InstructionShape::kStrided;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Number of internal pointers needed to reference shared memory
static int const kPointerCount =
Layout::TileShape::kContiguous / Policy::LdsmShape::kContiguous / Layout::kFactor;
/// Pointer type used for accesses
using AccessType = Array<Element, Layout::kElementsPerAccess>;
/// Internal counter used to jump to next K partition
int k_group_idx_;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_[kPointerCount];
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess),
byte_offset_(0),
k_group_idx_(0) {
int quad_pair = (lane_id >> 3);
int quad_quad = (lane_id >> 4);
//int lane_in_quad = (lane_id & 3);
int lane_in_quad_pair = (lane_id & 7);
int lane_in_quad_quad = (lane_id & 15);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount; ++i) {
int partition_contiguous_idx = -1;
int access_contiguous_idx = -1;
int access_strided_idx = -1;
if (Policy::LdsmShape::kContiguous == 4) {
// Matrix multiply 1688 A/B
// Q0 Q1 Q2 Q3 (Q stands for 1 8x128bit block).
// Four blocks are next to each other in the contiguous dimension.
partition_contiguous_idx = (lane_id % Layout::kFactor);
access_contiguous_idx = quad_pair ^ (lane_in_quad_pair / Layout::kFactor);
access_strided_idx = lane_in_quad_pair / Layout::kFactor;
} else if (Policy::LdsmShape::kContiguous == 2 &&
kOperand == Operand::kA) {
// Matrix multiply 16816 A
// Q0 Q1
// Q2 Q3
partition_contiguous_idx = (lane_id % Layout::kFactor);
access_contiguous_idx =
(((quad_pair & 1) + i * 2) ^ (lane_in_quad_pair / Layout::kFactor));
access_strided_idx = (lane_in_quad_pair + (lane_id >> 4 << 3)) / 2;
} else if (Policy::LdsmShape::kContiguous == 2 &&
kOperand == Operand::kB) {
// Matrix multiply 16816 B
// Q0 Q2
// Q1 Q3
partition_contiguous_idx = (lane_id % Layout::kFactor);
access_contiguous_idx = (quad_quad + i * 2) ^ (lane_in_quad_pair / Layout::kFactor);
access_strided_idx = (lane_in_quad_quad / Layout::kFactor);
}
int access_contiguous =
partition_contiguous_idx * Layout::PartitionShape::kContiguous +
access_contiguous_idx;
int access_strided = access_strided_idx;
pointer_[i] = reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int contiguous_offset = tile_offset.contiguous();
if (Shape::kContiguous ==
Layout::PartitionShape::kContiguous * Layout::kElementsPerAccess) {
if (tile_offset.contiguous() % 2) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount / 2; ++i) {
AccessType const *tmp_pointer = pointer_[i];
pointer_[i] = pointer_[i + kPointerCount / 2];
pointer_[i + kPointerCount / 2] = tmp_pointer;
}
}
contiguous_offset = (tile_offset.contiguous() >> 1) << 1;
}
int offset = (tile_offset.strided() * InstructionShape::kStrided) *
stride_ * Layout::kElementsPerAccess / Layout::kFactor +
contiguous_offset * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
add_tile_offset({0, 1});
if (kPartitionsK > 1) {
++k_group_idx_;
// Jump to next stage
if (k_group_idx_ == Policy::kGroupsPerTile) {
k_group_idx_ = 0;
add_tile_offset(
{0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)});
}
}
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) *
Layout::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
Array<unsigned, Policy::LdsmShape::kCount> *fetch_ptr =
reinterpret_cast<Array<unsigned, Policy::LdsmShape::kCount> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsmIterations::kContiguous;
AccessType const *source_ptr =
pointer_[c % kPointerCount] +
Layout::TileShape::kContiguous * (c / kPointerCount) +
Policy::kLdsmOpInner * Policy::LdsmShape::kStrided * s * stride_ / Layout::kFactor;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
cutlass::arch::ldsm<layout::ColumnMajor, Policy::LdsmShape::kCount>(
fetch_ptr[access_idx],
source_byte_ptr
);
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_ / Layout::kFactor;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no op
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps with 32B warp tile
/// the contiguous dimension. This assumes Threadblock contiguous dimension has
/// the same size as the warp tile. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// This specialization can be merged into the general one. Most code is the same.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCongruous<16, 16>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Element number when the layout crosses
static int const kCrosswise = 16;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, kCrosswise>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kContiguous % InstructionShape::kContiguous),
"Shape of warp-level Mma must be divisible by operator shape.");
// Determine number of elements along outer dimension per individual LDSM op
static int const kLdsmOpOuter = Layout::kElementsPerAccess;
static int const kLdsmOpInner = 8;
static_assert(!(Shape::kContiguous % kLdsmOpOuter),
"Shape of warp-level mma must be divisible by LDSM's fundamental tile size.");
static_assert(!(Shape::kStrided % kLdsmOpInner),
"Shape of warp-level mma must be divisible by LDSM's fundamental tile size.");
/// Shape of one individual LDSM instruction
static int const LdsmShapeStrided =
InstructionShape::kStrided / kLdsmOpInner;
static int const LdsmShapeContiguous = 4 / LdsmShapeStrided;
using LdsmShape =
layout::PitchLinearShape<LdsmShapeContiguous, LdsmShapeStrided>;
/// Number and arrangement of LDSM instructions
using LdsmIterations = layout::PitchLinearShape<
Shape::kContiguous / Layout::kElementsPerAccess / LdsmShapeContiguous,
1>;
/// Number of groups for each tile
static int const kGroupsPerTile =
Shape::kStrided / InstructionShape::kStrided;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Number of internal pointers needed to reference shared memory
static int const kPointerCount =
Layout::TileShape::kContiguous / Policy::LdsmShape::kContiguous / Layout::kFactor;
/// Pointer type used for accesses
using AccessType = Array<Element, Layout::kElementsPerAccess>;
/// Internal counter used to jump to next K partition
int k_group_idx_;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_[kPointerCount];
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess),
byte_offset_(0),
k_group_idx_(0) {
//int quad_pair = (lane_id >> 3);
int quad_quad = (lane_id >> 4);
int lane_in_pair = (lane_id & 1);
int lane_in_quad = (lane_id & 3);
int lane_in_quad_pair = (lane_id & 7);
int lane_in_quad_quad = (lane_id & 15);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount; ++i) {
int partition_contiguous_idx = -1;
int access_contiguous_idx = -1;
int access_strided_idx = -1;
if (Policy::LdsmShape::kContiguous == 2 &&
kOperand == Operand::kA) {
// Matrix multiply 16816 A
// Q0 Q1
// Q2 Q3
partition_contiguous_idx = lane_in_quad / 2;
access_strided_idx = lane_in_quad_pair / Layout::kFactor + quad_quad * 2;
access_contiguous_idx =
((lane_in_pair * 2 + ((lane_id & 8) >> 3)) ^
access_strided_idx);
} else if (Policy::LdsmShape::kContiguous == 2 &&
kOperand == Operand::kB) {
// Matrix multiply 16816 B
// Q0 Q2
// Q1 Q3
partition_contiguous_idx = lane_in_quad / 2;
access_strided_idx = lane_in_quad_quad / Layout::kFactor;
access_contiguous_idx =
((lane_in_pair * 2 + quad_quad) ^
access_strided_idx);
}
int access_contiguous =
partition_contiguous_idx * Layout::PartitionShape::kContiguous +
access_contiguous_idx;
int access_strided = access_strided_idx;
pointer_[i] = reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int contiguous_offset = tile_offset.contiguous();
if (Shape::kContiguous ==
Layout::PartitionShape::kContiguous * Layout::kElementsPerAccess) {
if (tile_offset.contiguous() % 2) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount / 2; ++i) {
AccessType const *tmp_pointer = pointer_[i];
pointer_[i] = pointer_[i + kPointerCount / 2];
pointer_[i + kPointerCount / 2] = tmp_pointer;
}
}
contiguous_offset = (tile_offset.contiguous() >> 1) << 1;
}
int offset = (tile_offset.strided() * InstructionShape::kStrided) *
stride_ * Layout::kElementsPerAccess / Layout::kFactor +
contiguous_offset * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
add_tile_offset({0, 1});
if (kPartitionsK > 1) {
++k_group_idx_;
// Jump to next stage
if (k_group_idx_ == Policy::kGroupsPerTile) {
k_group_idx_ = 0;
add_tile_offset(
{0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)});
}
}
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) *
Layout::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
Array<unsigned, Policy::LdsmShape::kCount> *fetch_ptr =
reinterpret_cast<Array<unsigned, Policy::LdsmShape::kCount> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsmIterations::kContiguous;
AccessType const *source_ptr =
pointer_[c % kPointerCount] +
Layout::TileShape::kContiguous * (c / kPointerCount) +
Policy::kLdsmOpInner * Policy::LdsmShape::kStrided * s * stride_ / Layout::kFactor;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
cutlass::arch::ldsm<layout::ColumnMajor, Policy::LdsmShape::kCount>(
fetch_ptr[access_idx],
source_byte_ptr
);
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_ / Layout::kFactor;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no op
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Element number when the layout crosses (in units of elements)
int Crosswise,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, Crosswise>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA,
"MmaTensorOpMultiplicandIterator for ColumnMajor Congruous may "
"only be instantiated for A operand to warp-level Mma.");
/// Element type
using Element = Element_;
/// MBlock or NBlock size
static int const kCrosswise = Crosswise;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, kCrosswise>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
kCrosswise>,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Element number when the layout crosses (in units of elements)
int Crosswise,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, Crosswise>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kB,
"MmaTensorOpMultiplicandIterator for RowMajor Congruous may "
"only be instantiated for B operand to warp-level Mma.");
/// Element type
using Element = Element_;
/// Element number when the layout crosses
static int const kCrosswise = Crosswise;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, kCrosswise>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
kCrosswise>,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Element number when the layout crosses (in units of elements)
int Crosswise,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand == Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for "
"A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Element number when the layout crosses
static int const kCrosswise = Crosswise;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, kCrosswise>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kContiguous % InstructionShape::kContiguous),
"Shape of warp-level Mma must be divisible by operator shape.");
// Determine number of elements along outer dimension per individual LDSM op
static int const kLdsmOpOuter = Layout::kElementsPerAccess;
static int const kLdsmOpInner = 8;
static_assert(!(Shape::kContiguous % kLdsmOpOuter),
"Shape of warp-level mma must be divisible by LDSM's "
"fundamental tile size.");
static_assert(!(Shape::kStrided % kLdsmOpInner),
"Shape of warp-level mma must be divisible by LDSM's "
"fundamental tile size.");
/// Shape of one individual LDSM instruction
static int const LdsmShapeContiguous =
InstructionShape::kContiguous / kLdsmOpOuter;
static int const LdsmShapeStrided =
((4 / LdsmShapeContiguous * kLdsmOpInner) > Shape::kStrided)
? (Shape::kStrided / kLdsmOpInner)
: (4 / LdsmShapeContiguous);
using LdsmShape =
layout::PitchLinearShape<LdsmShapeContiguous, LdsmShapeStrided>;
/// Number and arrangement of LDSM instructions
using LdsmIterations =
layout::PitchLinearShape<1, Shape::kStrided / kLdsmOpInner /
LdsmShape::kStrided>;
///
static int const kGroupsPerTile = Layout::TileShape::kContiguous /
Layout::kFactor / LdsmShape::kContiguous;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, Shape::kStrided *
InstructionShape::kContiguous / kThreads>;
private:
/// Total number of sections. The memory is divided into stages. One stage
/// can store one tile. Stage is divided into sections. Interleaved layout
/// can have multiple sections in a stage. The rest layout only has one section
/// in a stage.
int sections_;
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
/// Internal counter used to determine when to increment byte offset and when
/// to XOR it
int k_group_idx_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator()
: pointer_(nullptr),
sections_(0),
stride_(0),
byte_offset_(0),
k_group_idx_(0) {}
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: pointer_(reinterpret_cast<AccessType const *>(ref.data())),
sections_(ref.stride(0) / kCrosswise),
// stride_ = kCrosswise x sections_ x kFactor
stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess),
byte_offset_(0),
k_group_idx_(0) {
// Warp level iterator at most use double buffer to hide latency. If there
// are more than 2 sections, every stage should have more than 1 section.
// Turing silicon requires all 32 threads in a warp provide valid addresses
// even for LDSM.1 and LDSM.2
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 750))
lane_id = lane_id % (Policy::LdsmShape::kCount * Policy::kLdsmOpInner);
#endif
int quad_quad = (lane_id >> 4);
int quad_pair = (lane_id >> 3);
int lane_in_pair = (lane_id & 1);
int lane_in_quad = (lane_id & 3);
int lane_in_quad_pair = (lane_id & 7);
int lane_in_quad_quad = (lane_id & 15);
int partition_contiguous_idx = -1;
int access_contiguous_idx = -1;
int access_strided_idx = -1;
if (Layout::kFactor == 4) {
// Super Integer matrix multiply Interleaved-32
int factor_in_partition =
(Layout::PartitionShape::kContiguous * Layout::kFactor /
Layout::TileShape::kContiguous);
if (Policy::LdsmShape::kStrided == Policy::LdsmShape::kCount) {
// Integer matrix multiply 8816 A/B
partition_contiguous_idx = lane_in_quad / factor_in_partition;
access_contiguous_idx = ((lane_in_pair * factor_in_partition) ^
(lane_in_quad_quad / Layout::kFactor));
access_strided_idx = lane_id / Layout::kFactor;
}
else if (Policy::LdsmShape::kStrided ==
(Policy::LdsmShape::kCount / 2) &&
kOperand == Operand::kA) {
// Integer matrix multiply 16832 A
partition_contiguous_idx = lane_in_quad / factor_in_partition;
access_strided_idx = lane_in_quad_quad / Layout::kFactor;
access_contiguous_idx =
((lane_in_pair * factor_in_partition + quad_quad) ^
access_strided_idx);
}
else if (Policy::LdsmShape::kStrided ==
(Policy::LdsmShape::kCount / 2) &&
kOperand == Operand::kB) {
// Integer matrix multiply 16832 B
partition_contiguous_idx = lane_in_quad / factor_in_partition;
access_strided_idx = lane_in_quad_pair / Layout::kFactor + quad_quad * 2;
access_contiguous_idx =
((lane_in_pair * factor_in_partition + ((lane_id & 8) >> 3)) ^
access_strided_idx);
}
} else if (Layout::kFactor == 2) {
// Super Matrix multiply kBlock = 32
if (Policy::LdsmShape::kStrided == Policy::LdsmShape::kCount) {
// Matrix multiply 1688 A/B
// (Q stands for 1 8x128bit block).
// Q0
// Q1
// Q2
// Q3
// Four blocks are next to each other in the strided dimension.
partition_contiguous_idx = (lane_id % Layout::kFactor);
access_contiguous_idx = (lane_in_quad_pair / Layout::kFactor);
access_strided_idx = lane_id / Layout::kFactor;
} else if (Policy::LdsmShape::kStrided ==
(Policy::LdsmShape::kCount / 2) &&
kOperand == Operand::kA) {
// Matrix multiply 16816|1688.TF32 A
// Q0 Q2
// Q1 Q3
partition_contiguous_idx = (lane_id % Layout::kFactor);
access_contiguous_idx =
(quad_quad ^ (lane_in_quad_pair / Layout::kFactor));
access_strided_idx = (lane_in_quad_quad / Layout::kFactor);
} else if (Policy::LdsmShape::kStrided ==
(Policy::LdsmShape::kCount / 2) &&
kOperand == Operand::kB) {
// Matrix multiply 16816|1688.TF32 B
// Q0 Q1
// Q2 Q3
partition_contiguous_idx = (lane_id % Layout::kFactor);
access_contiguous_idx =
((quad_pair & 1) ^ (lane_in_quad_pair / Layout::kFactor));
access_strided_idx =
(lane_in_quad_pair + (lane_id >> 4 << 3)) / Layout::kFactor;
}
else if (Policy::LdsmShape::kContiguous == Policy::LdsmShape::kCount) {
// Matrix multiply 16832.SP B
// Q0 Q1 Q2 Q3
partition_contiguous_idx = (lane_id % Layout::kFactor);
access_contiguous_idx =
(quad_pair ^ (lane_in_quad_pair / Layout::kFactor));
access_strided_idx = lane_in_quad_pair / Layout::kFactor;
}
} else if (Layout::kFactor == 1) {
// Super Matrix multiply kBlock = 64
if (Policy::LdsmShape::kStrided == Policy::LdsmShape::kCount) {
// Q0
// Q1
// Q2
// Q3
partition_contiguous_idx = (lane_in_quad_pair >> 2);
access_contiguous_idx = lane_in_quad;
access_strided_idx = lane_id;
}
else if (Policy::LdsmShape::kStrided ==
(Policy::LdsmShape::kCount / 2) &&
kOperand == Operand::kA) {
// Matrix multiply 16816|1688.TF32 A
// Q0 Q2
// Q1 Q3
partition_contiguous_idx = (lane_in_quad_pair >> 2);
access_contiguous_idx = (quad_quad ^ lane_in_quad);
access_strided_idx = lane_in_quad_quad;
} else if (Policy::LdsmShape::kStrided ==
(Policy::LdsmShape::kCount / 2) &&
kOperand == Operand::kB) {
// Matrix multiply 16816|1688.TF32 B
// Q0 Q1
// Q2 Q3
partition_contiguous_idx = (lane_in_quad_pair >> 2);
access_contiguous_idx = ((quad_pair & 1) ^ lane_in_quad);
access_strided_idx = lane_in_quad_pair + (lane_id >> 4 << 3);
}
else if (Policy::LdsmShape::kContiguous == Policy::LdsmShape::kCount) {
// Matrix multiply 16832.SP B
// Q0 Q1 Q2 Q3
partition_contiguous_idx = (lane_in_quad_pair >> 2);
access_contiguous_idx = (quad_pair ^ lane_in_quad);
access_strided_idx = lane_in_quad_pair;
}
}
int access_contiguous =
partition_contiguous_idx * Layout::PartitionShape::kContiguous +
access_contiguous_idx;
int access_strided = access_strided_idx;
byte_offset_ = (access_contiguous + access_strided * stride_) *
sizeof_bits<Element>::value * Layout::kElementsPerAccess / 8;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof_bits<Element>::value / 8;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
int whole_tiles = tile_offset.contiguous() / Policy::kGroupsPerTile;
int k_groups_delta = tile_offset.contiguous() % Policy::kGroupsPerTile;
byte_offset_ ^= k_groups_delta * sizeof_bits<Element>::value *
Layout::kElementsPerAccess *
Policy::LdsmShape::kContiguous / 8;
pointer_ +=
tile_offset.strided() * stride_ * Shape::kStrided / Layout::kFactor +
whole_tiles * stride_ / sections_;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(
TensorCoord const &tile_offset) {
int whole_tiles = tile_offset.contiguous() / Policy::kGroupsPerTile;
int k_groups_delta = tile_offset.contiguous() % Policy::kGroupsPerTile;
if (k_groups_delta < 0) {
whole_tiles -= 1;
k_groups_delta += Policy::kGroupsPerTile;
}
if ((Policy::kGroupsPerTile / kPartitionsK) >= 2) {
byte_offset_ ^= (k_groups_delta & 1) * Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value *
Layout::kElementsPerAccess / 8;
}
if ((Policy::kGroupsPerTile / kPartitionsK) >= 4) {
byte_offset_ ^= ((k_groups_delta + (k_group_idx_ & 1)) & 2) *
Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value *
Layout::kElementsPerAccess / 8;
}
if ((Policy::kGroupsPerTile / kPartitionsK) == 8) {
byte_offset_ ^= ((k_groups_delta + (k_group_idx_ & 3)) & 4) *
Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value *
Layout::kElementsPerAccess / 8;
}
k_group_idx_ += k_groups_delta;
whole_tiles += k_group_idx_ / (Policy::kGroupsPerTile / kPartitionsK);
k_group_idx_ = k_group_idx_ % (Policy::kGroupsPerTile / kPartitionsK);
pointer_ +=
tile_offset.strided() * stride_ * Shape::kStrided / Layout::kFactor +
whole_tiles * stride_ / sections_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator++() {
// Integer matrix multiply 16832 Interleaved-32
// NONE
// Integer matrix multiply 16816 Interleaved-32 || Integer matrix multiply 16816 kblock=32
// Integer matrix multiply 8816 Interleaved-32
// ^1 ^1
// Matrix multiply 1684.TF32 kblock=16 || Integer matrix multiply 16816 kblock=64
// Matrix multiply 1688 kblock=32 || Integer matrix multiply 8816 kblock=64
// ^1 ^3 ^1 ^3
// Matrix multiply 1688 kblock=64
// ^1 ^3 ^1 ^7 ^1 ^3 ^1 ^7
// Matrix multiply 16816 kblock=32 | 1688.TF32 kblock=16 || Integer matrix multiply 16832 kblock=64
// ^2 ^2
// Matrix multiply 16816 kblock=64 | 1688.TF32 kblock=32 || Integer matrix multiply 16832 kblock=128
// ^2 ^6 ^2 ^6
if ((Policy::kGroupsPerTile / kPartitionsK) > 1) {
int mask = ((Policy::kGroupsPerTile / kPartitionsK) == 8)
? 3
: (((Policy::kGroupsPerTile / kPartitionsK) == 4) ? 1 : 0);
if (((k_group_idx_ & mask) % 2) == 0)
byte_offset_ ^= 1 * Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value *
Layout::kElementsPerAccess / 8;
else if ((k_group_idx_ & mask) == 1)
byte_offset_ ^= 3 * Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value *
Layout::kElementsPerAccess / 8;
else if ((k_group_idx_ & mask) == 3)
byte_offset_ ^= 7 * Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value *
Layout::kElementsPerAccess / 8;
}
k_group_idx_++;
if (k_group_idx_ == (Policy::kGroupsPerTile / kPartitionsK)) {
k_group_idx_ = 0;
add_tile_offset({Policy::kGroupsPerTile, 0});
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &operator--() { assert(0); }
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { load_with_byte_offset(frag, 0); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
Array<unsigned, Policy::LdsmShape::kCount> *fetch_ptr =
reinterpret_cast<Array<unsigned, Policy::LdsmShape::kCount> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsmIterations::kContiguous;
AccessType const *source_ptr =
pointer_ + Policy::LdsmShape::kContiguous * c +
Policy::kLdsmOpInner / Layout::kFactor *
Policy::LdsmShape::kStrided * s * stride_;
char const *source_byte_ptr =
reinterpret_cast<char const *>(source_ptr) + byte_offset +
byte_offset_;
cutlass::arch::ldsm<layout::RowMajor, Policy::LdsmShape::kCount>(
fetch_ptr[access_idx], source_byte_ptr);
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset = tile_offset.contiguous() *
InstructionShape::kContiguous /
Layout::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_;
byte_offset += sizeof_bits<AccessType>::value * pointer_offset / 8;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
k_group_idx_ = k_group % (Policy::kGroupsPerTile / kPartitionsK);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Element number when the layout crosses (in units of elements)
int Crosswise,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kB,
"MmaTensorOpMultiplicandIterator for ColumnMajor Crosswise may "
"only be instantiated for B operand to warp-level Mma.");
/// Element type
using Element = Element_;
/// KBlock size
static int const kCrosswise = Crosswise;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, kCrosswise>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
kCrosswise>,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: iterator_({ref.data(), ref.stride()}, lane_id) {}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset_negative({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { iterator_.load(frag); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag, {tile_offset.contiguous(), tile_offset.strided()}, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Element number when the layout crosses (in units of elements)
int Crosswise,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA,
"MmaTensorOpMultiplicandIterator for RowMajor Crosswise may "
"only be instantiated for A operand to warp-level Mma.");
/// Element type
using Element = Element_;
/// Element number when the layout crosses
static int const kCrosswise = Crosswise;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, kCrosswise>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
kCrosswise>,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: iterator_({ref.data(), ref.stride()}, lane_id) {}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset_negative({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { iterator_.load(frag); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpAccumulatorTileIterator;
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store
/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major
/// accumulator layout.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpAccumulatorTileIterator<
Shape_, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static bool const kDivisible =
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN);
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
(Shape::kRow + InstructionShape::kM - 1) / InstructionShape::kM,
(Shape::kColumn + InstructionShape::kN - 1) / InstructionShape::kN
>;
};
private:
// Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire
// shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements
// of that row. The accumulators within one row are assumed to be consecutive.
static int const kElementsPerAccess = InstructionShape::kN / 4;
static int const kRowsPerTile = 8;
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
Policy::MmaIterations::kCount * InstructionShape::kMN / kThreads>;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
frag[mma_accum_start + row * kElementsPerAccess + col] = offset_ref.at({accum_m, accum_n});
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
int idx = mma_accum_start + row * kElementsPerAccess + col;
offset_ref.at({accum_m, accum_n}) = frag[idx];
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store
/// accumulators from memory and is agnostic to layout.
///
/// This iterator is not tested.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpAccumulatorTileIterator<
Shape_, Element_, cutlass::layout::AffineRankN<2>, InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static bool const kDivisible =
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN);
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
(Shape::kRow + InstructionShape::kM - 1) / InstructionShape::kM,
(Shape::kColumn + InstructionShape::kN - 1) / InstructionShape::kN
>;
};
private:
// Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire
// shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements
// of that row. The accumulators within one row are assumed to be consecutive.
static int const kElementsPerAccess = InstructionShape::kN / 4;
static int const kRowsPerTile = 8;
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
Policy::MmaIterations::kCount * InstructionShape::kMN / kThreads>;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
frag[mma_accum_start + row * kElementsPerAccess + col] = offset_ref.at({accum_m, accum_n});
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
int idx = mma_accum_start + row * kElementsPerAccess + col;
offset_ref.at({accum_m, accum_n}) = frag[idx];
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store
/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major
/// accumulator layout.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpAccumulatorTileIterator<Shape_, Element_,
cutlass::layout::ColumnMajor,
InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static bool const kDivisible =
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN);
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
(Shape::kRow + InstructionShape::kM - 1) / InstructionShape::kM,
(Shape::kColumn + InstructionShape::kN - 1) / InstructionShape::kN
>;
};
private:
// Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire
// shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements
// of that row. The accumulators within one row are assumed to be consecutive.
static int const kElementsPerAccess = InstructionShape::kN / 4;
static int const kRowsPerTile = 8;
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element,
Policy::MmaIterations::kCount * InstructionShape::kMN / kThreads>;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
int idx = mma_accum_start + row * kElementsPerAccess + col;
frag[idx] = offset_ref.at({accum_m, accum_n});
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
int idx = mma_accum_start + row * kElementsPerAccess + col;
offset_ref.at({accum_m, accum_n}) = frag[idx];
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store
/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major
/// accumulator layout.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element typ
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_,
/// Interleaved N
int InterleavedN>
class MmaTensorOpAccumulatorTileIterator<
Shape_, Element_, cutlass::layout::ColumnMajorInterleaved<InterleavedN>,
InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorInterleaved<InterleavedN>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
};
private:
static int const kElementsPerAccess = 2;
public:
//
// Derived quantities
//
using AccessType = Array<Element, kElementsPerAccess>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, Shape::kCount / kThreads>;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
AccessType* frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int accum_m = mma_m * InstructionShape::kM;
int accum_n = mma_n * InstructionShape::kN;
int idx = mma_m + mma_n * Policy::MmaIterations::kRow;
AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() +
offset_ref.offset(TensorCoord(accum_m, accum_n)));
frag_ptr[idx] = access_ptr[0];
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int accum_m = mma_m * InstructionShape::kM;
int accum_n = mma_n * InstructionShape::kN;
int idx = mma_m + mma_n * Policy::MmaIterations::kRow;
AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() +
offset_ref.offset(TensorCoord(accum_m, accum_n)));
access_ptr[0] = frag_ptr[idx];
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store
/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major
/// accumulator layout.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element typ
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_,
/// Interleaved N
int InterleavedN>
class MmaTensorOpAccumulatorTileIterator<
Shape_, Element_, cutlass::layout::TensorNCxHWx<InterleavedN>,
InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = int8_t;
/// Layout of source tile
using Layout = cutlass::layout::TensorNCxHWx<InterleavedN>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of elements in strided dimension that each STG writes
static int const kStridedPerSTG = 8;
/// Factor to calculate reorder index to pack accumulator.
static int const kPackedFactor = Shape::kColumn / 32;
/// Number of mma operations performed
using MmaIterations = MatrixShape<Shape::kRow / kStridedPerSTG,
Shape::kColumn / InterleavedN>;
};
private:
static int const kElementsPerAccess = InterleavedN / 4;
public:
//
// Derived quantities
//
struct alignas((kElementsPerAccess * sizeof_bits<Element>::value / 8)) AccessType {
Array<Element, kElementsPerAccess> storage;
};
/// Fragment object holding a thread's part of a tile
using Fragment = Array<int32_t, Shape::kCount / kThreads>;
private:
/// Reference to output tensor
TensorRef ref_;
/// Row offset index globally
LongIndex global_offset_row_;
/// Column offset index globally
LongIndex global_offset_col_;
/// Output tensor size
TensorCoord extent_;
/// Alpha
float alpha_;
/// Beta
float beta_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int const lane_id,
TensorCoord extent,
float alpha = 1.0f,
float beta = 0.0f
):
ref_(ref),
extent_(extent),
alpha_(alpha),
beta_(beta) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
global_offset_row_ = quad;
global_offset_col_ = lane_in_quad * kElementsPerAccess;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_tile_offset(MatrixCoord const &tile_offset) {
global_offset_row_ += tile_offset.row() * Shape::kRow;
global_offset_col_ += tile_offset.column() * Shape::kColumn;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
AccessType* frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kN; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kM; ++mma_m) {
int accum_m = mma_m * InstructionShape::kM;
int accum_n = mma_n * InstructionShape::kN;
int idx = mma_m + mma_n * Policy::MmaIterations::kM;
AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() +
accum_m * offset_ref.stride(0) + accum_n);
frag_ptr[idx] = access_ptr[0];
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
Array<float, Shape::kCount / kThreads> output_frag_f;
Array<Element, Shape::kCount / kThreads> output_frag;
LongIndex pq = extent_.h() * extent_.w();
LongIndex extent_row = extent_.n() * pq;
LongIndex extent_col = extent_.c();
LongIndex k_major = (global_offset_col_ / InterleavedN) * pq;
Index k_minor = global_offset_col_ % InterleavedN;
LongIndex k_offset = k_major * InterleavedN + k_minor;
LongIndex k_offset_delta = pq * InterleavedN;
LongIndex stride_n = pq * extent_.c();
Index n;
LongIndex pq_rem;
unsigned int pq_mul, pq_shr;
find_divisor(pq_mul, pq_shr, pq);
if(beta_ == 0.0f) {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < int(frag.size()); ++i) {
output_frag_f[i] = frag[i];
}
if(InstructionShape::kM == Policy::kStridedPerSTG) {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < int(frag.size()); ++i) {
output_frag[i] = (Element)(output_frag_f[i] * alpha_);
}
} else {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < int(frag.size()); ++i) {
int map_i = (i / (16 * Policy::kPackedFactor)) * (16 * Policy::kPackedFactor)
+ (i % (8 * Policy::kPackedFactor)) / 2 * 4
+ (i % (8 * Policy::kPackedFactor)) % 2
+ (i / (8 * Policy::kPackedFactor)) % 2 * 2;
output_frag[i] = (Element)(output_frag_f[map_i] * alpha_);
}
}
AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&output_frag);
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int accum_m = mma_m * Policy::kStridedPerSTG;
fast_divmod(n, pq_rem, global_offset_row_ + accum_m, pq, pq_mul, pq_shr);
LongIndex offset_m = n * stride_n + k_offset + pq_rem * InterleavedN;
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
int accum_n = mma_n * InterleavedN;
int idx = mma_n + mma_m * Policy::MmaIterations::kColumn;
if((global_offset_row_ + accum_m < extent_row) && (global_offset_col_ + accum_n < extent_col)) {
AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() +
offset_m + mma_n * k_offset_delta);
access_ptr[0] = frag_ptr[idx];
}
}
}
} else {
if(InstructionShape::kM == Policy::kStridedPerSTG) {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < int(frag.size()); ++i) {
output_frag_f[i] = frag[i];
}
} else {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < int(frag.size()); ++i) {
int map_i = (i / (16 * Policy::kPackedFactor)) * (16 * Policy::kPackedFactor)
+ (i % (8 * Policy::kPackedFactor)) / 2 * 4
+ (i % (8 * Policy::kPackedFactor)) % 2
+ (i / (8 * Policy::kPackedFactor)) % 2 * 2;
output_frag_f[i] = frag[map_i];
}
}
AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&output_frag);
Array<Element, kElementsPerAccess> ref_frag;
AccessType *ref_frag_ptr = reinterpret_cast<AccessType *>(&ref_frag);
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int accum_m = mma_m * Policy::kStridedPerSTG;
fast_divmod(n, pq_rem, global_offset_row_ + accum_m, pq, pq_mul, pq_shr);
LongIndex offset_m = n * stride_n + k_offset + pq_rem * InterleavedN;
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
int accum_n = mma_n * InterleavedN;
int idx = mma_n + mma_m * Policy::MmaIterations::kColumn;
if((global_offset_row_ + accum_m < extent_row) && (global_offset_col_ + accum_n < extent_col)) {
AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() +
offset_m + mma_n * k_offset_delta);
ref_frag_ptr[0] = access_ptr[0];
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < kElementsPerAccess; ++i) {
output_frag[idx * kElementsPerAccess + i] = Element(alpha_ * output_frag_f[idx * kElementsPerAccess + i]
+ beta_ * ref_frag[i]);
}
access_ptr[0] = frag_ptr[idx];
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_tensor_op_tile_iterator.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_tile_iterator.h",
"repo_id": "include",
"token_count": 59772
} | 43 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over densely packed tensors in global memory
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mixed-precision reduction
template <
typename ElementAccumulator_,
typename Element_,
int Count = 1
>
struct ReduceAdd {
//
// Type definitions
//
using ElementAccumulator = ElementAccumulator_;
using Element = Element_;
static int const kCount = Count;
using FragmentAccumulator = cutlass::Array<ElementAccumulator, kCount>;
using FragmentElement = cutlass::Array<Element, kCount>;
struct Params { };
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
ReduceAdd(Params params_ = Params()): params(params_) { }
/// Operator
CUTLASS_HOST_DEVICE
FragmentAccumulator operator()(
FragmentAccumulator accumulator,
FragmentElement element) const {
plus<FragmentAccumulator> op;
NumericArrayConverter<
ElementAccumulator,
Element,
kCount,
PreferredRoundingMode<ElementAccumulator, Element>::kRound> converter;
return op(accumulator, converter(element));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Special handling for binary operators
template <typename ReductionOp, typename Element, int N>
struct VectorizeArrayOperation {
using ValueType = Array<Element, N>;
CUTLASS_HOST_DEVICE
ValueType operator()(
ReductionOp const &reduction_op,
ValueType const &lhs,
ValueType const &rhs) const {
ValueType result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = reduction_op(lhs[i], rhs[i]);
}
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ReductionOp, typename Element, int N>
struct ReduceArrayOperation {
using ArrayType = Array<Element, N>;
CUTLASS_HOST_DEVICE
Element operator()(
ReductionOp const &reduction_op,
ArrayType const &array) const {
Element item = reduction_op(array[0], array[1]);
CUTLASS_PRAGMA_UNROLL
for (int i = 2; i < N; ++i) {
item = reduction_op(item, array[i]);
}
return item;
}
};
template <int N>
struct ReduceArrayOperation<logical_and<uint1b_t>, uint1b_t, N> {
using ArrayType = Array<uint1b_t, N>;
CUTLASS_HOST_DEVICE
uint1b_t operator()(
logical_and<uint1b_t> const &reduction_op,
ArrayType const &array) const {
uint8_t const *ptr = reinterpret_cast<uint8_t const *>(&array);
bool item = false;
CUTLASS_PRAGMA_UNROLL
for (int byte = 0; byte < (N + 7) / 8; ++byte) {
uint8_t bits = ptr[byte];
item = (item || !bits);
}
return uint1b_t(!item);
}
};
template <int N>
struct ReduceArrayOperation<logical_or<uint1b_t>, uint1b_t, N> {
using ArrayType = Array<uint1b_t, N>;
CUTLASS_HOST_DEVICE
uint1b_t operator()(
logical_and<uint1b_t> const &reduction_op,
ArrayType const &array) const {
uint8_t const *ptr = reinterpret_cast<uint8_t const *>(&array);
bool item = true;
CUTLASS_PRAGMA_UNROLL
for (int byte = 0; byte < (N + 7) / 8; ++byte) {
uint8_t bits = ptr[byte];
item = (item || bits);
}
return uint1b_t(item);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper function to infer template argument types
template <typename ReductionOp, typename Element, int N>
CUTLASS_HOST_DEVICE
Array<Element, N> ApplyArrayOperator(
ReductionOp const &reduction_op,
Array<Element, N> const &lhs,
Array<Element, N> const &rhs) {
VectorizeArrayOperation<ReductionOp, Element, N> vectorize_op;
return vectorize_op(reduction_op, lhs, rhs);
}
/// Helper to reduce an array
template <typename ReductionOp, typename Element, int N>
Element ReduceArray(ReductionOp const &reduction_op, Array<Element, N> const &array) {
ReduceArrayOperation<ReductionOp, Element, N> reduce_array_op;
return reduce_array_op(reduction_op, array);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/reduction/thread/reduction_operators.h/0 | {
"file_path": "include/cutlass/reduction/thread/reduction_operators.h",
"repo_id": "include",
"token_count": 2057
} | 44 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
namespace cutlass {
namespace transform {
namespace thread {
namespace UnaryTransform {
struct Identity; ///< None (i.e., identity)
struct Conjugate; ///< Complex conjugate
}
/// Element-wise unary operator that transforms one element of a fragment at a time
template<
typename FragmentIn, ///< Input Fragment
typename FragmentOut,///< Output Fragment
typename Transform> ///< Unary transform operator
class UnaryOp
{
public:
CUTLASS_DEVICE
static FragmentOut execute(FragmentIn &in)
{
static_assert(FragmentIn::kElements == FragmentOut::kElements, "Number of elements must match.");
static_assert(platform::is_same<Transform, UnaryTransform::Identity>::value ||
platform::is_same<Transform, UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
FragmentOut out;
if (platform::is_same<Transform, UnaryTransform::Identity>::value )
{
CUTLASS_PRAGMA_UNROLL
for (int i=0; i < FragmentIn::kElements; ++i){
out[i] = static_cast<typename FragmentOut::Element>(in[i]);
}
}
else if (platform::is_same<Transform, UnaryTransform::Conjugate>::value )
{
for (int i=0; i < FragmentIn::kElements; ++i){
out[i] = conj(static_cast<typename FragmentOut::Element>(in[i]));
}
}
return out;
}
};
template<typename FragmentIn, typename Transform>
class UnaryOp<FragmentIn, FragmentIn, Transform>
{
public:
CUTLASS_DEVICE
static FragmentIn execute(FragmentIn &in)
{
static_assert(platform::is_same<Transform, UnaryTransform::Identity>::value ||
platform::is_same<Transform, UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
if (platform::is_same<Transform, UnaryTransform::Identity>::value )
{
return in;
}
else if (platform::is_same<Transform, UnaryTransform::Conjugate>::value )
{
for(int i=0; i < FragmentIn::kElements; ++i){
in[i] = conj(in[i]);
}
}
return in;
}
};
}
}
}
| include/cutlass/transform/thread/unary_op.h/0 | {
"file_path": "include/cutlass/transform/thread/unary_op.h",
"repo_id": "include",
"token_count": 1663
} | 45 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::PitchLinear,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Element type per access
using AccessType = Array<Element, ThreadMap::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: stride_(ref.stride(0) / ThreadMap::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_base));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset in the unit of tile.
/// In GEMM/Conv implementation, this is used to move in the k dimension in the shared memory.
/// Below layouts are the shared memory layouts. Current SM50 SIMT kernels only use col major A and row major B.
/// For row major A operand, k dimension is contiguous dimension;
/// For col major A operand, k dimension is strided dimension;
/// For row major B operand, k dimension is strided dimension;
/// For col major B operand, k dimension is contiguous dimension.
/// Below two classes map col/row major to the pitch linear coordinates used
/// in this base class.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * Shape::kContiguous +
coord.strided() * Shape::kStrided * stride_ *
ThreadMap::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for column major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajor,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for row major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::RowMajor,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h/0 | {
"file_path": "include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h",
"repo_id": "include",
"token_count": 4230
} | 46 |
[README](../../README.md#documentation) > **CUTLASS 3: Building with Clang as host compiler**
# Building with Clang as host compiler
CUTLASS 3.2(.1) reintroduces support for building with
Clang as host compiler, and NVCC as device compiler.
This is NOT the same as building with
Clang as both host and device compiler ("CUDA Clang").
# Software prerequisites
1. Clang (regularly tested with Clang 14;
occasionally tested with Clang 10 and greater)
2. CUDA Toolkit (tested with 12.2; other versions likely work)
3. CMake (at least 3.18)
4. git
5. Python (at least 3.6)
Experience with Ubuntu 22.04 LTS is that
clang requires the following packages to be installed.
```bash
$ sudo apt-get install clang cmake ninja-build pkg-config libgtk-3-dev liblzma-dev libstdc++-12-dev
```
A symptom of not installing all needed dependencies
is the following error when attempting to use clang:
`"/usr/bin/ld: cannot find -lstdc++: No such file or directory"`.
# Running CMake
## Required CMake options
The Clang build requires specifying the following CMake options.
Replace `<path-to-clang++>` with the path to your `clang++` executable.
You may use `clang++` directly if it is in your `PATH`.
* `CMAKE_CXX_COMPILER=<path-to-clang++>`
* `CMAKE_CUDA_HOST_COMPILER=<path-to-clang++>`
One must set both! It's not enough just to set the `CXX` environment
variable, for example. Symptoms of only setting `CMAKE_CXX_COMPILER`
(or only setting the `CXX` environment variable) include `cc1plus`
(GCC's compiler executable) reporting build errors due to it not
understanding Clang's command-line options.
Users can also specify a particular CUDA Toolkit version
by setting the CMake option `CMAKE_CUDA_COMPILER`
to the path to the `nvcc` executable
that lives in the CUDA Toolkit's directory. For example,
if `${PATH_TO_CUDA_TOOLKIT}` is the CUDA Toolkit directory,
then one can set `CMAKE_CUDA_COMPILER` as follows.
* `CMAKE_CUDA_COMPILER=${PATH_TO_CUDA_TOOLKIT}/bin/nvcc`
| media/docs/build/building_with_clang_as_host_compiler.md/0 | {
"file_path": "media/docs/build/building_with_clang_as_host_compiler.md",
"repo_id": "media",
"token_count": 654
} | 47 |
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS")
[README](../../README.md#documentation) > **Fundamental Types**
# Fundamental Types
CUTLASS defies several fundamental numeric and container classes upon which computations and
algorithms algorithms for linear algebra computations are implemented.
Where possible, CUTLASS fundamental types mirror the C++ Standard Library. However, there are circumstances that necessitate divergence from the Standard Library's specification. In such cases, the CUTLASS implementation adopts unique capitalization to distinguish that standard vocabulary types may not be safely substituted in all cases.
Most types in CUTLASS are usable in both host code and device code. Moreover, they are functional regardless of compute capability, but they may only be efficient when hardware support is present.
## Numeric Types
CUTLASS defines classes for the following numeric data types.
* `half_t`: IEEE half-precision floating point (exponent: 5b, mantissa: 10b; literal suffix `_hf`)
* `bfloat16_t`: BFloat16 data type (exponent: 8b, mantissa: 7b; literal suffix `_bf16`)
* `tfloat32_t`: Tensor Float 32 data type (exponent: 8b, mantissa: 10b; literal suffix `_tf32`)
* `int4_t`, `uint4_t`: 4b signed and unsigned integer (literal suffx `_s4`, `_u4`)
* `bin1_t`: 1b binary numeric type (literal suffix `_b1`)
* `complex<T>`: defines complex-valued data type based on the supplied real-valued numeric type
Numeric types in CUTLASS may be used in both host and device code and are intended to function
like any other plain-old-data type.
If CUTLASS is compiled with `CUTLASS_F16C_ENABLED`, then hardware conversion is used for
half-precision types in host code. Regardless, `cutlass::half_t` uses the most efficient
NVIDIA GPU hardware instructions available in device code.
Example:
```c++
#include <iostream>
#include <cutlass/numeric_types.h>
__global__ void kernel(cutlass::half_t x) {
printf("Device: %f\n", float(x * 2.0_hf));
}
int main() {
cutlass::half_t x = 0.5_hf;
std::cin >> x;
std::cout << "Host: " << 2.0_hf * x << std::endl;
kernel<<< dim3(1,1), dim3(1,1,1) >>>(x);
return 0;
}
```
## Containers
CUTLASS uses the following containers extensively for implementing efficient CUDA kernels.
### Array
```c++
template <
typename T, // element type
int N // number of elements
>
struct Array;
```
`Array<class T, int N>` defines a statically sized array of elements of type _T_ and size _N_. This class is similar to
[`std::array<>`](https://en.cppreference.com/w/cpp/container/array) in the Standard Library with one notable exception:
partial specializations exist to pack or unpack elements smaller than one byte.
`Array<>` is intended to be a convenient and uniform container class to store arrays of numeric elements regardless of data type or vector length. The storage needed is expected to be the minimum necessary given the logical size of each numeric type in bits (numeric types smaller than one byte are densely packed). Nevertheless, the size reported by `sizeof(Array<T, N>)` is always an integer multiple of bytes.
Storing numeric elements in a C++ STL-style container class enables useful modern C++ mechanisms such as range-based for loops. For example, to print the elements of `Array<>`, the following range-based for loop syntax is always valid regardless of numeric data type, compute capability, or context in host or device code.
Example:
```c++
int const kN;
Array<T, kN> elements;
CUTLASS_PRAGMA_UNROLL // required to ensure array remains in registers
for (auto x : elements) {
printf("%d, %f", int64_t(x), double(x)); // explictly convert to int64_t or double
}
```
When copying `Array<>` objects or passing them as arguments to methods, it is best to avoid accessing individual elements. This enables the use of vector instructions to perform the operation more efficiently. For example, setting all elements to zero is best performed by calling the `clear()` method. Copies should be performed by assigning the entire object.
Example:
```c++
#include <cutlass/array.h>
int const kN;
Array<T, kN> source;
Array<T, kN> destination;
source.clear(); // set all elements to value of zero
destination = source; // copy to `destination`
```
`Array<>` may be used to store elements smaller than one byte such as 4b integers.
```c++
Array<int4b_t, 2> packed_integers;
static_assert(
sizeof(packed_integers) == 1,
"Packed storage of sub-byte data types is compact.");
// Access array elements using usual indirection and assignment operators
packed_integers[0] = 2_s4;
packed_integers[1] = 3_s4;
CUTLASS_PRAGMA_UNROLL
for (auto x : elements) {
printf("%d", int(x)); // access elements normally
}
```
### AlignedArray
```c++
template <
typename T, // element type
int N, // number of elements
int Alignment // alignment requirement in bytes
>
class AlignedArray;
```
`AlignedArray` is derived from `Array<T, N>` and supports an optional alignment field. Pointers to objects of type `AlignedArray<>` reliably yield vectorized memory accesses when dereferenced.
Example:
```c++
int const kN = 8;
ArrayAligned<half_t, kN> source;
ArrayAligned<half_t, kN> const *ptr = ...;
source = *ptr; // 128b aligned memory access
```
### AlignedBuffer
```c++
template <
typename T, // element type
int N, // number of elements
int Alignment // alignment requirement in bytes
>
class AlignedBuffer;
```
`AlignedBuffer` provides a uniform way to define aligned memory allocations for all data types. This is particularly
useful in defining allocations within shared memory with guaranteed memory alignment needed for vectorized access.
Note, constructors of the elements within AlignedBuffer<> are not called, and so the elements are initially in an
undefined state.
Use `AlignedBuffer<>::data()` to obtain a pointer to the first element of the buffer.
**Example:** Guaranteed aligned shared memory allocation. Note, shared memory contents are uninitialized.
```c++
int const kN = 32;
int const kAlignment = 16; // alignment in bytes
// Define a shared memory allocation in device code
__shared__ AlignedBuffer<complex<half_t>, kN, kAlignment> matrix_tile;
complex<half_t> *ptr = matrix_tile.data(); // ptr is guaranteed to have 128b (16 Byte) alignment
```
Note, `AlignedBuffer<>` only guarantees that its internal memory allocation is aligned, obtained by `AlignedBuffer<>::data()`. There is no guarantee that the `AlignedBuffer<>` object itself satisfies alignment constraints or that its internal memory allocation is contiguous. Device code performing vectorized memory accesses should use the `AlignedArray<>` type.
**_Example_:** Vectorized memory access to shared memory allocations.
```c++
int const kN = 1024;
__shared__ AlignedBuffer<half_t, kN> smem_buffer;
AlignedArray<half_t, 8> *ptr = reinterpret_cast<AlignedArray<half_t, 8> *>(smem_buffer.data());
AlignedArray<half_t, 8> x = ptr[threadIdx.x]; // 128b shared memory load
```
### Numeric Conversion
CUTLASS defines procedures for performing numeric conversion between data types in `cutlass/numeric_conversion.h`.
Where possible, these target hardware acceleration on the target architecture and support multiple rounding modes.
```c++
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
NumericConverter<half_t, float> convert_f32_to_f16;
NumericConverter<tfloat32_t, float> convert_f32_to_tf32;
half_t x = convert_f32_to_f16(3.14159f);
tfloat32_t y = convert_f32_to_tf32(3.14159f);
```
Recent GPU architectures such as NVIDIA Turing and Ampere combine numeric conversion with efficient packing
into bit vectors. Consequently, CUTLASS defines conversion on both scalars and `Array<>` objects to implement
the optimal code sequence on all architectures.
```c++
//
// Example: convert and pack 32b signed integers to a vector of packed signed 8-bit integers.
//
int const kN = 16;
Array<int8_t, kN> destination;
Array<int, kN> source;
NumericConverter<descltype(destination), decltype(source)> convert;
destination = convert(source);
```
### Coord
```c++
template <
int Rank,
typename Index = int
>
class Coord;
```
`Coord<Rank, class T = int>` is a container used explicitly for defining logical coordinates in tensors of known rank. Traditional vector operators are defined such as `+`, `-`, and scalar multiplication `*` to simplify the creation of vector-valued expressions on tensor coordinates.
**Example:** Vector operations on coordinates.
```c++
Coord<2> compute_offset(Coord<2> const & base) {
Coord<2> stride = make_Coord(1, kM);
return base + stride * make_Coord(threadIdx.x, threadIdx.y);
}
```
Instances of `Coord<>` are used throughout CUTLASS to compute indices into tensors. Frequently, the dimensions of tensors of known layouts may be given names such as "rows" or "columns". To clarify the code, we have implemented several classes derived from `Coord<>` with accessors for each coordinate member.
Such classes include:
```c++
struct MatrixCoord : public Coord<2> {
Index & row();
Index & column();
};
```
and
```c++
struct Tensor4DCoord : public Coord<4> {
Index & n();
Index & h();
Index & w();
Index & c();
};
```
### PredicateVector<int Bits>
`PredicateVector<int Bits>` contains a statically sized array of hardware predicates packed into registers to enable efficient access within unrolled loops.
This container is optimized for sequential access through iterators, though these are only efficient when used within fully unrolled loops.
Moreover, instances of `PredicateVector<>` are not guaranteed to be updated until any non-const iterator objects have gone out of scope. This is because iterators are effectively caches that update the `PredicateVector<>` instance's internal storage as a batch.
**Example:** Managing an array of predicates.
```c++
unsigned mask;
PredicateVector<kBits> predicates;
// Nested scope to update predicates via an iterator
{
auto pred_it = predicates.begin();
CUTLASS_PRAGMA_UNROLL
for (int bit = 0; bit < kBits; ++bit, ++pred_it) {
bool guard = (mask & (1u << bit));
pred_it.set(guard);
}
}
// Efficient use of predicates to guard memory instructions
T *ptr;
Array<T, kAccesses> fragment;
auto pred_it = predicates.const_begin();
for (int access = 0; access < kAccesses; ++access, ++pred_it) {
if (*pred_it) {
fragment[access] = ptr[access];
}
}
```
Note: `PredicateVector<>` is not efficient when accessed via dynamic random access. If an array of bits is needed with dynamic random access (in contrast with access via _constexpr_ indices), then `Array<bin1_t, N>` should be used instead.
## Functional
CUTLASS defines function objects corresponding to basic arithmetic operations modeled after C++ Standard Library's `<functional>` header.
CUTLASS extends this by defining `multiply_add<T>` which computes `d = a * b + c`. The partial specialization `multiply_add<complex<T>>` computes complex-valued multiplication and addition using four real-valued multiply-add operations; these may correspond to native hardware instructions.
Example:
```c++
complex<float> a;
complex<float> b;
complex<float> c;
complex<float> d;
multiply_add<complex<float>> mad_op;
d = mad_op(a, b, c); // four single-precision multiply-add instructions
```
CUTLASS defines partial specializations for type `Array<T, N>`, performing elementwise operations on each element. A further partial specialization for `Array<half_t, N>` targets may target native SIMD instructions for compute capability SM60 and beyond.
**Example:** Fused multiply-add of arrays of half-precision elements.
```c++
static int const kN = 8;
Array<half_t, kN> a;
Array<half_t, kN> b;
Array<half_t, kN> c;
Array<half_t, kN> d;
multiply_add<Array<half_t, kN>> mad_op;
d = mad_op(a, b, c); // efficient multiply-add for Array of half-precision elements
```
## Numeric Conversion
Operators are define to convert between numeric types in `numeric_conversion.h`. Conversion operators are defined in
terms of individual numeric elements and on arrays which enable the possibility of efficient hardware
support on current and future NVIDIA GPUs.
**Example:** Converting between 32-b and 8-b integers.
```c++
```
# Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| media/docs/fundamental_types.md/0 | {
"file_path": "media/docs/fundamental_types.md",
"repo_id": "media",
"token_count": 4214
} | 48 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import logging
import os
import sys
import cutlass_library
def _cuda_install_path_from_nvcc() -> str:
import subprocess
# Attempt to detect CUDA_INSTALL_PATH based on location of NVCC
result = subprocess.run(['/usr/bin/which', 'nvcc'], capture_output=True)
if result.returncode != 0:
raise Exception(f'Unable to find nvcc via `which` utility.')
cuda_install_path = result.stdout.decode('utf-8').split('/bin/nvcc')[0]
if not os.path.isdir(cuda_install_path):
raise Exception(f'Environment variable "CUDA_INSTALL_PATH" is not defined, '
f'and default path of {cuda_install_path} does not exist.')
return cuda_install_path
CUTLASS_PATH = os.getenv("CUTLASS_PATH", cutlass_library.source_path)
# Alias CUTLASS_PATH as source_path
source_path = CUTLASS_PATH
_CUDA_INSTALL_PATH = None
def cuda_install_path():
"""
Helper method for on-demand fetching of the CUDA installation path. This allows
the import of CUTLASS to proceed even if NVCC is not available, preferring to
raise this error only when an operation that needs NVCC is being performed.
"""
global _CUDA_INSTALL_PATH
if _CUDA_INSTALL_PATH is None:
_CUDA_INSTALL_PATH = os.getenv("CUDA_INSTALL_PATH", _cuda_install_path_from_nvcc())
return _CUDA_INSTALL_PATH
CACHE_FILE = "compiled_cache.db"
from cutlass_library import (
DataType,
EpilogueScheduleType,
KernelScheduleType,
MathOperation,
LayoutType,
OpcodeClass,
TileDescription,
TileSchedulerType,
)
this = sys.modules[__name__]
this.logger = logging.getLogger(__name__)
# RMM is only supported for Python 3.9+
if (sys.version_info.major == 3 and sys.version_info.major > 8) or sys.version_info.major > 3:
try:
import rmm
this.use_rmm = True
except ImportError:
this.use_rmm = False
else:
this.use_rmm = False
def set_log_level(level: int):
"""
Sets the log level
:param log_level: severity of logging level to use. See https://docs.python.org/3/library/logging.html#logging-levels for options
:type log_level: int
"""
this.logger.setLevel(level)
set_log_level(logging.ERROR)
from cutlass.library_defaults import OptionRegistry
from cutlass.backend.utils.device import device_cc
this._option_registry = None
def get_option_registry():
"""
Helper method for on-demand initialization of the options registry. This avoids building
the registry when CUTLASS is imported.
"""
if this._option_registry is None:
this.logger.info("Initializing option registry")
this._option_registry = OptionRegistry(device_cc())
return this._option_registry
this.__version__ = '3.5.0'
from cutlass.backend import create_memory_pool
from cutlass.emit.pytorch import pytorch
from cutlass.op.gemm import Gemm
from cutlass.op.conv import Conv2d, Conv2dFprop, Conv2dDgrad, Conv2dWgrad
from cutlass.op.gemm_grouped import GroupedGemm
from cutlass.op.op import OperationBase
from cutlass.backend.evt.ir.tensor import Tensor
this.memory_pool = None
def get_memory_pool():
""""
Helper method for on-demand memory pool. This avoids allocating the memory pool unnecessarily
whe CUTLASS is imported.
"""
if this.use_rmm and this.memory_pool is None:
this.memory_pool = create_memory_pool(init_pool_size=2 ** 30, max_pool_size=2 ** 32)
return this.memory_pool
from cuda import cuda, cudart
this._device_id = None
def initialize_cuda_context():
if this._device_id is not None:
return
if this.use_rmm:
# This also covers initializing the CUDA context
get_memory_pool()
device_id = os.getenv("CUTLASS_CUDA_DEVICE_ID")
if device_id is None:
if not this.use_rmm:
# Manually call cuInit() and create context by making a runtime API call
err, = cudart.cudaFree(0)
if err != cudart.cudaError_t.cudaSuccess:
raise RuntimeError(f"cudaFree failed with error {err}")
err, device_count = cuda.cuDeviceGetCount()
if err != cuda.CUresult.CUDA_SUCCESS:
raise Exception(f"cuDeviceGetCount failed with error {err}")
if device_count <= 0:
raise Exception("No CUDA devices found")
device_id = 0
this._device_id = int(device_id)
def device_id() -> int:
initialize_cuda_context()
return this._device_id
| python/cutlass/__init__.py/0 | {
"file_path": "python/cutlass/__init__.py",
"repo_id": "python",
"token_count": 2173
} | 49 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Base class for Python EVT Frontend
"""
from typing import Union
from cutlass_library import DataType
from cutlass.backend.evt.ir import (
ComputeNode,
DAGIR,
LayoutNode,
LoadNode,
StoreNode,
)
from cutlass.backend.evt.passes import (
EVTGraphDrawer,
EVTPassManager,
GetSmemSize,
PassDAG2Tree,
PassGetArgumentType,
PassGetImpl,
PassFixElementD,
PassLayoutManipulateElimination,
PassPreprocessRed,
PassShapeTypePropagation,
)
from cutlass.backend.utils import device_cc
from cutlass.epilogue.evt_ops import permute, reshape
from cutlass.utils.datatypes import library_type
class EVTFrontendBase:
layout_fns = {
"permute": permute,
"reshape": reshape
}
def __init__(self, element_compute=DataType.f32, cc=None, additional_passes=[], **kwargs) -> None:
self.cc = cc if cc else device_cc()
self.element_compute = library_type(element_compute)
self.dag_ir = DAGIR(self.element_compute, self.cc)
self.compute_cnt = 0
self.layout_cnt = 0
self.pass_manager = EVTPassManager(
self.dag_ir,
[
PassPreprocessRed,
PassGetArgumentType,
PassShapeTypePropagation,
PassLayoutManipulateElimination,
PassGetImpl,
PassDAG2Tree,
PassFixElementD
] + additional_passes)
if self.cc == 80:
self._epilogue_stages = 1
else:
self._epilogue_stages = None
@property
def epilogue_stages(self):
return self._epilogue_stages
@epilogue_stages.setter
def epilogue_stages(self, stages):
self._epilogue_stages = stages
def parse(self, *args, **kwargs):
raise NotImplementedError(f"The 'parse' function must be overloaded in frontend class")
def trace(self, *args, **kwargs):
# Parse the input
self.parse(*args, **kwargs)
# Run the passes
self.pass_manager()
# Set the epilogue type
self.epilogue_thread_type = self.dag_ir.epilogue_thread_type
if self.cc == 90:
self.arg_c_type = self.dag_ir.arg_c_type
self.arg_d_type = self.dag_ir.arg_d_type
self.reduction_names = self.dag_ir.reduction_names
#
# Helper functions for DAG IR manipulation
#
def add_node(self, node):
self.dag_ir.add_node(node)
def add_edge(self, src, tgt, weight=0):
self.dag_ir.add_edge(src, tgt, weight=weight)
def set_tensor(self, node_name, example):
"""
Add an example tensor to node {node_name} in the DAG IR
"""
meta = self.dag_ir.get_node_meta(node_name)
meta.tensor = {"tensor": example}
def set_store_tensor(self, node_name, example):
"""
Add an example tensor to node {node_name} in the DAG IR
"""
meta = self.dag_ir.get_node_meta(node_name)
meta.store_tensor = {"tensor": example}
def mark_output(self, node_name):
"""
Mark a store node as output
"""
meta = self.dag_ir.get_node_meta(node_name)
if not isinstance(meta, StoreNode):
raise ValueError(
f"Only StoreNodes can be marked as output. "
f"Got {type(meta).__name__}: {node_name}")
meta.is_output = True
# Add node with specific type
def add_load_node(self, name, example):
"""
Add a Load node to DAG IR
:param name: name of the loaded variable
:type name: str
:param example: example input
:type example: np.ndarray|torch.Tensor|cupy.ndarray|float
"""
if name is None:
raise ValueError(f"Name is not provided.")
if example is None:
raise ValueError(f"Example input for {name} is not provided.")
load_node = LoadNode(name)
load_node.tensor = {"tensor": example}
# Special logics for accumulator
if name == "accum":
if load_node.tensor.rank == 2:
new_shape = tuple([1, ] + list(load_node.tensor.shape))
load_node.tensor.broadcast(new_shape)
elif load_node.tensor.rank < 2 or load_node.tensor.rank > 3:
raise ValueError(f"Expect example inputs for 'accum' be a rank-2 or rank-3 tensor. Got {load_node.tensor.shape}.")
self.add_node(load_node)
def add_imm(self, value: Union[float,int]):
"""
Add an immediate scalar value to DAG IR
:param value: the value of the immediate scalar
:type value: float
"""
try:
value = float(value)
except:
raise ValueError(f"{type(value).__name__} cannot be converted to float.")
name = f"imm_{value}".replace('.', '_')
load_node = LoadNode(name)
load_node.tensor = {"tensor": value, "is_constant": True}
self.add_node(load_node)
return name
def add_compute_node(self, op, name=None):
"""
Add a compute node.
:param op: the computation op
:param name: the node name (optional)
:type name: str
:return: the name of the compute node
"""
if name is None:
name = f"compute_{self.compute_cnt}"
self.compute_cnt += 1
compute_node = ComputeNode(
name=name, fn=op,
element_output=self.element_compute,
element_compute=self.element_compute)
self.add_node(compute_node)
return compute_node.name
def add_layout_node(self, op, kwargs, name=None):
"""
Add a layout node.
:param op: the layout op
:type op: evt_ops
:param name: the node name (optional)
:type name: str
:return: the name of the layout node
"""
if name is None:
name = f"layout_{self.layout_cnt}"
self.layout_cnt += 1
layout_node = LayoutNode(name=name, fn=op, kwargs=kwargs)
self.add_node(layout_node)
return layout_node.name
def add_store_node(self, name):
store_node = StoreNode(name)
self.add_node(store_node)
#
# Visualization The DAG IR
#
def visualize(self, name="dag_ir"):
"""
Visualize the dag ir with svg file
:param name: the name of the graph
"""
drawer = EVTGraphDrawer(self.dag_ir, name)
try:
for name, graph in drawer.get_dot_graph():
graph.write_svg(f"./{name}.svg")
except:
raise RuntimeError(
"'dot' is not found in path. GraphDrawer is disabled. "
"Please install it with 'sudo apt-get install graphviz'."
)
#
# Get shared memory size
#
def get_smem_size(self, tile_description):
"""
Get the shared memory size of the epilogue
"""
smem_size = GetSmemSize(self.dag_ir)(tile_description)
return smem_size
| python/cutlass/backend/evt/frontend/frontend_base.py/0 | {
"file_path": "python/cutlass/backend/evt/frontend/frontend_base.py",
"repo_id": "python",
"token_count": 3787
} | 50 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utility functions for interacting with the device
"""
from cuda import cuda, cudart
import cutlass
from cutlass.utils.datatypes import is_cupy_tensor, is_numpy_tensor, is_torch_tensor
def check_cuda_errors(result: list):
"""
Checks whether `result` contains a CUDA error raises the error as an exception, if so. Otherwise,
returns the result contained in the remaining fields of `result`.
:param result: the results of the `cudart` method, consisting of an error code and any method results
:type result: list
:return: non-error-code results from the `results` parameter
"""
# `result` is of the format : (cudaError_t, result...)
err = result[0]
if err.value:
raise RuntimeError("CUDA error: {}".format(cudart.cudaGetErrorName(err)))
if len(result) == 1:
return None
elif len(result) == 2:
return result[1]
else:
return result[1:]
def device_cc(device: int = -1) -> int:
"""
Returns the compute capability of the device with ID `device`.
:param device: ID of the device to query
:type device: int
:return: compute capability of the queried device (e.g., 80 for SM80)
:rtype: int
"""
if device == -1:
device = cutlass.device_id()
deviceProp = check_cuda_errors(cudart.cudaGetDeviceProperties(device))
major = str(deviceProp.major)
minor = str(deviceProp.minor)
return int(major + minor)
def device_sm_count(device: int = -1):
if device == -1:
device = cutlass.device_id()
err, device_sm_count = cuda.cuDeviceGetAttribute(
cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise Exception(
"Failed to retireve SM count. "
f"cuDeviceGetAttribute() failed with error: {cuda.cuGetErrorString(err)[1]}"
)
return device_sm_count
def to_device_ptr(tensor) -> cuda.CUdeviceptr:
"""
Converts a tensor to a CUdeviceptr
:param tensor: tensor to convert
:type tensor: np.ndarray | torch.Tensor | cp.ndarray | int
:return: device pointer
:rtype: cuda.CUdeviceptr
"""
if is_numpy_tensor(tensor):
ptr = cuda.CUdeviceptr(tensor.__array_interface__["data"][0])
elif is_torch_tensor(tensor):
ptr = cuda.CUdeviceptr(tensor.data_ptr())
elif is_cupy_tensor(tensor):
ptr = cuda.CUdeviceptr(int(tensor.data.ptr))
elif isinstance(tensor, cuda.CUdeviceptr):
ptr = tensor
elif isinstance(tensor, int):
ptr = cuda.CUdeviceptr(tensor)
else:
raise NotImplementedError(tensor)
return ptr
| python/cutlass/backend/utils/device.py/0 | {
"file_path": "python/cutlass/backend/utils/device.py",
"repo_id": "python",
"token_count": 1519
} | 51 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utility functions for checking constraints on kernels and calculating kernel attributes
"""
import ctypes
from cutlass_library import DataTypeSize, OperationKind, SharedMemPerCC
import cutlass
from cutlass.backend.library import TileDescription
def calculate_smem_usage_per_stage(td: TileDescription, operation_kind: OperationKind) -> int:
"""
Returns the amount of shared memory in bytes consumed in a single stage of a kernel.
:param td: tile description to compute shared memory of
:type td: TileDescription
:param operation_kind: identifier for the type of operation being performed
:type operation_kind: cutlass_library.OperationKind
:return: number of bytes of shared memory consumed by a single stage
:rtype: int
"""
m, n, k = td.threadblock_shape
if operation_kind == OperationKind.Gemm:
stage_barrier_bytes = 32
return (
(DataTypeSize[td.math_instruction.element_a] * m * k // 8)
+ (DataTypeSize[td.math_instruction.element_b] * k * n // 8)
+ stage_barrier_bytes
)
else:
raise Exception(f"No available shared memory calculation for operation kind {operation.operation_kind}")
def calculate_smem_usage(operation) -> int:
"""
Returns the amount of shared memory in bytes consumed by a kernel.
:return: number of bytes of shared memory consumed by the operation
:return: int
"""
_per_stage = calculate_smem_usage_per_stage(operation.tile_description, operation.operation_kind)
return _per_stage * operation.tile_description.stages
def valid_stage_count(
cc: int,
kernel_cc: int,
td: TileDescription,
element_C: cutlass.DataType = None,
element_D: cutlass.DataType = None,
verbose: bool = True) -> tuple:
"""
Checks whether a device with `cc` supports the number of stages within `tile_description`, both
based on raw limits on the number of stages and based on shared memory capacity
:param cc: compute capability of device in question
:type cc: int
:param kernel_cc: compute capability that the kernel targets (corresponding to the arch::SMxy tag in CUTLASS)
:type kernel_cc: int
:param td: tile description to check
:type td: TileDescription
:param element_C: data type of operand C
:type element_C: cutlass.DataType
:param element_D: data type of operand D
:type element_D: cutlass.DataType
:param verbose: whether to log warnings
:type verbose: bool
:return: tuple with the first element indicating whether the provided tile description is
valid for the provided device and the second element being an error message
:rtype: tuple
"""
if kernel_cc == 90:
if (td.stages is None or td.stages == 0):
# Stage count of None or 0 for SM90 indicates that the CollectiveBuilder automatically
# determines the stage count to use. Thus, all settings are valid in these scenarios.
return (True, "")
elif verbose:
cutlass.logger.warning(
"Setting an explicit stage count for SM90 kernels currently may "
"result in compilation errors if the combination of tile shape, "
"stage count, and shared memory requirement of the epilogue exceeds "
"the available shared memory per SM.")
if td.stages <= 0:
return (False, f"Stage counts must be positive integers. Tile description has stage count of {td.stages}.")
if cc < 80 and td.stages != 2:
return (False, f"Tile description has stage count of {td.stages}, "
f"but only 2 stages are supported on SM{cc}.")
# The calculation below does not consider shared memory used by the epilogue and, thus,
# only catches cases in which the mainloop exceeds the device's shared memory capacity.
# This is not a concern for CUTLASS 2.x kernels, for which the shared memory of the
# mainloop and epilogue is shared.
smem_per_stage = calculate_smem_usage_per_stage(td, OperationKind.Gemm)
smem_usage_mainloop = (smem_per_stage * td.stages)
smem_arch = SharedMemPerCC[cc] << 10
if smem_usage_mainloop > smem_arch:
return ( False,
"Configuration uses too much shared memory. Consider reducing stage count or tile shape.\n"
f"Details:\n"
f"Mainloop uses {smem_per_stage} bytes of shared memory per stage, and "
f"{td.stages} stages for a total of {smem_usage_mainloop} bytes.\n"
f"The maxmium amount of shared memory that can be used per block on CC {cc} is {smem_arch}.")
return (True, "")
def valid_cluster_shape(cc: int, cluster_shape: list) -> tuple:
"""
Checks whether a device with `cc` supports a thread block cluster of shape `cluster_shape`.
:param cc: compute capability of device in question
:type cc: int
:param cluster_shape: dimensions of thread block cluster shape to check
:type cluster_shape: list
:return: tuple with the first element indicating whether the provided cluster shape is
valid for the provided device and the second element being an error message
:rtype: tuple
"""
if cc < 90:
if cluster_shape != [1, 1, 1]:
return (False,
f"Cluster shape for pre-SM90 architectures must be [1, 1, 1]. Received cluster shape of "
f"{cluster_shape} for SM{cc}.")
else:
return (True, "")
if len(cluster_shape) != 3:
return (False,
f"Cluster shapes must be rank-3. Received {cluster_shape} (rank {len(cluster_shape)}")
if cluster_shape[2] != 1:
return (False,
"CUTLASS kernels currently require the third dimension of cluster shape to be 1. "
f"Received cluster shape of {cluster_shape}.")
# The CUDA programming guide currently defines a maximum of 8 thread blocks per cluster
# as being portably supported (https://docs.nvidia.com/cuda/cuda-c-programming-guide/#thread-block-clusters).
# Current CUTLASS kernels only have non-unit cluster dimensions within the first two dimensions,
# so we check that the first two dimensions of the cluster shape do not exceed 8 thread blocks in total.
blocks_in_2d = cluster_shape[0] * cluster_shape[1]
if blocks_in_2d > 8:
return (False,
f"Thread block clusters with more than 8 thread blocks are currently unsupported on SM{cc}. "
f"Received cluster shape {cluster_shape}, which has {blocks_in_2d} thread blocks.")
return (True, "")
def valid_schedule(
cc: int,
kernel_schedule: cutlass.KernelScheduleType,
epilogue_schedule: cutlass.EpilogueScheduleType,
tile_scheduler: cutlass.TileSchedulerType) -> tuple:
"""
Checks that the kernel and epilogue schedules passed in are a valid combination for
a device of compute capability ``cc``.
:param cc: compute capability of device in question
:type cc: int
:param kernel_schedule: kernel schedule type
:type kernel_schedule: cutlass.KernelScheduleType
:param epilogue_schedule: epilogue schedule type
:type epilogue_schedule: cutlass.EpilogueScheduleType
:param tile_scheduler: tile scheduler type
:type tile_scheduler: cutlass.TileSchedulerType
:return: tuple with the first element indicating whether the provided schedules are
valid for the provided device and the second element being an error message
:rtype: tuple
"""
kernel_auto = (kernel_schedule == cutlass.KernelScheduleType.ScheduleAuto)
epilogue_auto = (epilogue_schedule == cutlass.EpilogueScheduleType.ScheduleAuto)
tile_scheduler_default = (tile_scheduler == cutlass.TileSchedulerType.Default)
if cc < 90 and not (kernel_auto and epilogue_auto and tile_scheduler_default):
return (False, "Non-default schedules are only supported on SM90 and beyond")
if (kernel_auto and not epilogue_auto) or (not kernel_auto and epilogue_auto):
return (False, "Kernel and epilogue schedules must either both be auto or neither be auto")
if not tile_scheduler_default:
cooperative_kernels = [cutlass.KernelScheduleType.TmaWarpSpecializedCooperative,
cutlass.KernelScheduleType.CpAsyncWarpSpecializedCooperative]
if (tile_scheduler == cutlass.TileSchedulerType.StreamK) and (kernel_schedule not in cooperative_kernels):
return (False, "Stream-K tile scheduler is currently only supported with the cooperative kernel schedule")
return (True, "")
def alignment_or_default(alignment_provided: int, default_alignment: int) -> int:
"""
Returns `alignment_provided` if it is set, otherwise `default_alignment` and checks
that `alignment_provided` does not exceed `default_alignment`.
:param alignment_provided: alignment preference specified. Can be None.
:type alignment_provided: int
:param default_alignment: alignment to use if `alignment_provided` is None
:type default_alignment: int
:return: alignment to use
:rtype: int
"""
if alignment_provided is not None:
if alignment_provided > default_alignment:
raise Exception(f"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.")
return alignment_provided
return default_alignment
def update_alignment(alignment_provided:int, default_alignment: int) -> int:
"""
Returns `alignment_provided` if it is set, otherwise `default_alignment` and checks
that `alignment_provided` does not exceed `default_alignment`.
:param alignment_provided: alignment preference specified. Can be None.
:type alignment_provided: int
:param default_alignment: alignment to use if `alignment_provided` is None
:type default_alignment: int
:return: alignment to use
:rtype: int
"""
if alignment_provided is not None:
if alignment_provided > default_alignment:
if alignment_provided % default_alignment == 0:
return default_alignment
raise Exception(f"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.")
return alignment_provided
return default_alignment
| python/cutlass/utils/check.py/0 | {
"file_path": "python/cutlass/utils/check.py",
"repo_id": "python",
"token_count": 4035
} | 52 |
var DOCUMENTATION_OPTIONS = {
URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
VERSION: '3.1.0',
LANGUAGE: 'en',
COLLAPSE_INDEX: false,
BUILDER: 'html',
FILE_SUFFIX: '.html',
LINK_SUFFIX: '.html',
HAS_SOURCE: true,
SOURCELINK_SUFFIX: '.txt',
NAVIGATION_WITH_KEYS: false,
SHOW_SEARCH_SUMMARY: true,
ENABLE_SEARCH_SHORTCUTS: true,
}; | python/docs/_static/documentation_options.js/0 | {
"file_path": "python/docs/_static/documentation_options.js",
"repo_id": "python",
"token_count": 196
} | 53 |
/*
* searchtools.js
* ~~~~~~~~~~~~~~~~
*
* Sphinx JavaScript utilities for the full-text search.
*
* :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
"use strict";
/**
* Simple result scoring code.
*/
if (typeof Scorer === "undefined") {
var Scorer = {
// Implement the following function to further tweak the score for each result
// The function takes a result array [docname, title, anchor, descr, score, filename]
// and returns the new score.
/*
score: result => {
const [docname, title, anchor, descr, score, filename] = result
return score
},
*/
// query matches the full name of an object
objNameMatch: 11,
// or matches in the last dotted part of the object name
objPartialMatch: 6,
// Additive scores depending on the priority of the object
objPrio: {
0: 15, // used to be importantResults
1: 5, // used to be objectResults
2: -5, // used to be unimportantResults
},
// Used when the priority is not in the mapping.
objPrioDefault: 0,
// query found in title
title: 15,
partialTitle: 7,
// query found in terms
term: 5,
partialTerm: 2,
};
}
const _removeChildren = (element) => {
while (element && element.lastChild) element.removeChild(element.lastChild);
};
/**
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping
*/
const _escapeRegExp = (string) =>
string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
const _displayItem = (item, searchTerms) => {
const docBuilder = DOCUMENTATION_OPTIONS.BUILDER;
const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT;
const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX;
const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX;
const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY;
const [docName, title, anchor, descr, score, _filename] = item;
let listItem = document.createElement("li");
let requestUrl;
let linkUrl;
if (docBuilder === "dirhtml") {
// dirhtml builder
let dirname = docName + "/";
if (dirname.match(/\/index\/$/))
dirname = dirname.substring(0, dirname.length - 6);
else if (dirname === "index/") dirname = "";
requestUrl = docUrlRoot + dirname;
linkUrl = requestUrl;
} else {
// normal html builders
requestUrl = docUrlRoot + docName + docFileSuffix;
linkUrl = docName + docLinkSuffix;
}
let linkEl = listItem.appendChild(document.createElement("a"));
linkEl.href = linkUrl + anchor;
linkEl.dataset.score = score;
linkEl.innerHTML = title;
if (descr)
listItem.appendChild(document.createElement("span")).innerHTML =
" (" + descr + ")";
else if (showSearchSummary)
fetch(requestUrl)
.then((responseData) => responseData.text())
.then((data) => {
if (data)
listItem.appendChild(
Search.makeSearchSummary(data, searchTerms)
);
});
Search.output.appendChild(listItem);
};
const _finishSearch = (resultCount) => {
Search.stopPulse();
Search.title.innerText = _("Search Results");
if (!resultCount)
Search.status.innerText = Documentation.gettext(
"Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories."
);
else
Search.status.innerText = _(
`Search finished, found ${resultCount} page(s) matching the search query.`
);
};
const _displayNextItem = (
results,
resultCount,
searchTerms
) => {
// results left, load the summary and display it
// this is intended to be dynamic (don't sub resultsCount)
if (results.length) {
_displayItem(results.pop(), searchTerms);
setTimeout(
() => _displayNextItem(results, resultCount, searchTerms),
5
);
}
// search finished, update title and status message
else _finishSearch(resultCount);
};
/**
* Default splitQuery function. Can be overridden in ``sphinx.search`` with a
* custom function per language.
*
* The regular expression works by splitting the string on consecutive characters
* that are not Unicode letters, numbers, underscores, or emoji characters.
* This is the same as ``\W+`` in Python, preserving the surrogate pair area.
*/
if (typeof splitQuery === "undefined") {
var splitQuery = (query) => query
.split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu)
.filter(term => term) // remove remaining empty strings
}
/**
* Search Module
*/
const Search = {
_index: null,
_queued_query: null,
_pulse_status: -1,
htmlToText: (htmlString) => {
const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() });
const docContent = htmlElement.querySelector('[role="main"]');
if (docContent !== undefined) return docContent.textContent;
console.warn(
"Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template."
);
return "";
},
init: () => {
const query = new URLSearchParams(window.location.search).get("q");
document
.querySelectorAll('input[name="q"]')
.forEach((el) => (el.value = query));
if (query) Search.performSearch(query);
},
loadIndex: (url) =>
(document.body.appendChild(document.createElement("script")).src = url),
setIndex: (index) => {
Search._index = index;
if (Search._queued_query !== null) {
const query = Search._queued_query;
Search._queued_query = null;
Search.query(query);
}
},
hasIndex: () => Search._index !== null,
deferQuery: (query) => (Search._queued_query = query),
stopPulse: () => (Search._pulse_status = -1),
startPulse: () => {
if (Search._pulse_status >= 0) return;
const pulse = () => {
Search._pulse_status = (Search._pulse_status + 1) % 4;
Search.dots.innerText = ".".repeat(Search._pulse_status);
if (Search._pulse_status >= 0) window.setTimeout(pulse, 500);
};
pulse();
},
/**
* perform a search for something (or wait until index is loaded)
*/
performSearch: (query) => {
// create the required interface elements
const searchText = document.createElement("h2");
searchText.textContent = _("Searching");
const searchSummary = document.createElement("p");
searchSummary.classList.add("search-summary");
searchSummary.innerText = "";
const searchList = document.createElement("ul");
searchList.classList.add("search");
const out = document.getElementById("search-results");
Search.title = out.appendChild(searchText);
Search.dots = Search.title.appendChild(document.createElement("span"));
Search.status = out.appendChild(searchSummary);
Search.output = out.appendChild(searchList);
const searchProgress = document.getElementById("search-progress");
// Some themes don't use the search progress node
if (searchProgress) {
searchProgress.innerText = _("Preparing search...");
}
Search.startPulse();
// index already loaded, the browser was quick!
if (Search.hasIndex()) Search.query(query);
else Search.deferQuery(query);
},
/**
* execute search (requires search index to be loaded)
*/
query: (query) => {
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const titles = Search._index.titles;
const allTitles = Search._index.alltitles;
const indexEntries = Search._index.indexentries;
// stem the search terms and add them to the correct list
const stemmer = new Stemmer();
const searchTerms = new Set();
const excludedTerms = new Set();
const highlightTerms = new Set();
const objectTerms = new Set(splitQuery(query.toLowerCase().trim()));
splitQuery(query.trim()).forEach((queryTerm) => {
const queryTermLower = queryTerm.toLowerCase();
// maybe skip this "word"
// stopwords array is from language_data.js
if (
stopwords.indexOf(queryTermLower) !== -1 ||
queryTerm.match(/^\d+$/)
)
return;
// stem the word
let word = stemmer.stemWord(queryTermLower);
// select the correct list
if (word[0] === "-") excludedTerms.add(word.substr(1));
else {
searchTerms.add(word);
highlightTerms.add(queryTermLower);
}
});
if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js
localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" "))
}
// console.debug("SEARCH: searching for:");
// console.info("required: ", [...searchTerms]);
// console.info("excluded: ", [...excludedTerms]);
// array of [docname, title, anchor, descr, score, filename]
let results = [];
_removeChildren(document.getElementById("search-progress"));
const queryLower = query.toLowerCase();
for (const [title, foundTitles] of Object.entries(allTitles)) {
if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) {
for (const [file, id] of foundTitles) {
let score = Math.round(100 * queryLower.length / title.length)
results.push([
docNames[file],
titles[file] !== title ? `${titles[file]} > ${title}` : title,
id !== null ? "#" + id : "",
null,
score,
filenames[file],
]);
}
}
}
// search for explicit entries in index directives
for (const [entry, foundEntries] of Object.entries(indexEntries)) {
if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) {
for (const [file, id] of foundEntries) {
let score = Math.round(100 * queryLower.length / entry.length)
results.push([
docNames[file],
titles[file],
id ? "#" + id : "",
null,
score,
filenames[file],
]);
}
}
}
// lookup as object
objectTerms.forEach((term) =>
results.push(...Search.performObjectSearch(term, objectTerms))
);
// lookup as search terms in fulltext
results.push(...Search.performTermsSearch(searchTerms, excludedTerms));
// let the scorer override scores with a custom scoring function
if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item)));
// now sort the results by score (in opposite order of appearance, since the
// display function below uses pop() to retrieve items) and then
// alphabetically
results.sort((a, b) => {
const leftScore = a[4];
const rightScore = b[4];
if (leftScore === rightScore) {
// same score: sort alphabetically
const leftTitle = a[1].toLowerCase();
const rightTitle = b[1].toLowerCase();
if (leftTitle === rightTitle) return 0;
return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
}
return leftScore > rightScore ? 1 : -1;
});
// remove duplicate search results
// note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept
let seen = new Set();
results = results.reverse().reduce((acc, result) => {
let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(',');
if (!seen.has(resultStr)) {
acc.push(result);
seen.add(resultStr);
}
return acc;
}, []);
results = results.reverse();
// for debugging
//Search.lastresults = results.slice(); // a copy
// console.info("search results:", Search.lastresults);
// print the results
_displayNextItem(results, results.length, searchTerms);
},
/**
* search for object names
*/
performObjectSearch: (object, objectTerms) => {
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const objects = Search._index.objects;
const objNames = Search._index.objnames;
const titles = Search._index.titles;
const results = [];
const objectSearchCallback = (prefix, match) => {
const name = match[4]
const fullname = (prefix ? prefix + "." : "") + name;
const fullnameLower = fullname.toLowerCase();
if (fullnameLower.indexOf(object) < 0) return;
let score = 0;
const parts = fullnameLower.split(".");
// check for different match types: exact matches of full name or
// "last name" (i.e. last dotted part)
if (fullnameLower === object || parts.slice(-1)[0] === object)
score += Scorer.objNameMatch;
else if (parts.slice(-1)[0].indexOf(object) > -1)
score += Scorer.objPartialMatch; // matches in last name
const objName = objNames[match[1]][2];
const title = titles[match[0]];
// If more than one term searched for, we require other words to be
// found in the name/title/description
const otherTerms = new Set(objectTerms);
otherTerms.delete(object);
if (otherTerms.size > 0) {
const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase();
if (
[...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0)
)
return;
}
let anchor = match[3];
if (anchor === "") anchor = fullname;
else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname;
const descr = objName + _(", in ") + title;
// add custom score for some objects according to scorer
if (Scorer.objPrio.hasOwnProperty(match[2]))
score += Scorer.objPrio[match[2]];
else score += Scorer.objPrioDefault;
results.push([
docNames[match[0]],
fullname,
"#" + anchor,
descr,
score,
filenames[match[0]],
]);
};
Object.keys(objects).forEach((prefix) =>
objects[prefix].forEach((array) =>
objectSearchCallback(prefix, array)
)
);
return results;
},
/**
* search for full-text terms in the index
*/
performTermsSearch: (searchTerms, excludedTerms) => {
// prepare search
const terms = Search._index.terms;
const titleTerms = Search._index.titleterms;
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const titles = Search._index.titles;
const scoreMap = new Map();
const fileMap = new Map();
// perform the search on the required terms
searchTerms.forEach((word) => {
const files = [];
const arr = [
{ files: terms[word], score: Scorer.term },
{ files: titleTerms[word], score: Scorer.title },
];
// add support for partial matches
if (word.length > 2) {
const escapedWord = _escapeRegExp(word);
Object.keys(terms).forEach((term) => {
if (term.match(escapedWord) && !terms[word])
arr.push({ files: terms[term], score: Scorer.partialTerm });
});
Object.keys(titleTerms).forEach((term) => {
if (term.match(escapedWord) && !titleTerms[word])
arr.push({ files: titleTerms[word], score: Scorer.partialTitle });
});
}
// no match but word was a required one
if (arr.every((record) => record.files === undefined)) return;
// found search word in contents
arr.forEach((record) => {
if (record.files === undefined) return;
let recordFiles = record.files;
if (recordFiles.length === undefined) recordFiles = [recordFiles];
files.push(...recordFiles);
// set score for the word in each file
recordFiles.forEach((file) => {
if (!scoreMap.has(file)) scoreMap.set(file, {});
scoreMap.get(file)[word] = record.score;
});
});
// create the mapping
files.forEach((file) => {
if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1)
fileMap.get(file).push(word);
else fileMap.set(file, [word]);
});
});
// now check if the files don't contain excluded terms
const results = [];
for (const [file, wordList] of fileMap) {
// check if all requirements are matched
// as search terms with length < 3 are discarded
const filteredTermCount = [...searchTerms].filter(
(term) => term.length > 2
).length;
if (
wordList.length !== searchTerms.size &&
wordList.length !== filteredTermCount
)
continue;
// ensure that none of the excluded terms is in the search result
if (
[...excludedTerms].some(
(term) =>
terms[term] === file ||
titleTerms[term] === file ||
(terms[term] || []).includes(file) ||
(titleTerms[term] || []).includes(file)
)
)
break;
// select one (max) score for the file.
const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w]));
// add result to the result list
results.push([
docNames[file],
titles[file],
"",
null,
score,
filenames[file],
]);
}
return results;
},
/**
* helper function to return a node containing the
* search summary for a given text. keywords is a list
* of stemmed words.
*/
makeSearchSummary: (htmlText, keywords) => {
const text = Search.htmlToText(htmlText);
if (text === "") return null;
const textLower = text.toLowerCase();
const actualStartPosition = [...keywords]
.map((k) => textLower.indexOf(k.toLowerCase()))
.filter((i) => i > -1)
.slice(-1)[0];
const startWithContext = Math.max(actualStartPosition - 120, 0);
const top = startWithContext === 0 ? "" : "...";
const tail = startWithContext + 240 < text.length ? "..." : "";
let summary = document.createElement("p");
summary.classList.add("context");
summary.textContent = top + text.substr(startWithContext, 240).trim() + tail;
return summary;
},
};
_ready(Search.init);
| python/docs/_static/searchtools.js/0 | {
"file_path": "python/docs/_static/searchtools.js",
"repo_id": "python",
"token_count": 6918
} | 54 |
<jupyter_start><jupyter_text>Basic example of using the CUTLASS Python interfaceThis notebook walks through a basic example of using the CUTLASS Python interface to declare, compile, and run GEMMs.[](https://colab.research.google.com/github/NVIDIA/cutlass/tree/master/examples/00_basic_gemm.ipynb) We first import various packages needed for the example and construct the input and output tensors that will be used in our example.<jupyter_code>import numpy as np
import random
import cutlass
# This controls whether ther C++ GEMM declaration will be printed at each step. Set to `false` to
# omit this information.
print_module = True
m = 128
n = m
k = m
dtype = np.float16
type_A = np.float16
type_B = np.float16
type_C = np.float16
type_D = np.float16
np.random.seed(1234)
random.seed(1234)
scope_min = -4
scope_max = 4
tensor_A = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, k)).astype(type_A))
tensor_B = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(k, n)).astype(type_B))
tensor_C = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, n)).astype(type_C))
alpha = np.float16(1.)
beta = np.float16(0.)
tensor_D = np.zeros(tensor_C.shape).astype(type_D)<jupyter_output>/usr/local/lib/python3.8/dist-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
from .autonotebook import tqdm as notebook_tqdm<jupyter_text>Declaring and running a GEMMTo get started, one only needs to provide the tensors declared above to the `cutlass.op.Gemm` call.This sets up a default GEMM operation for the given device on which you are running.Assuming that we are running on SM80, this default to using a GEMM that leverages FP16 Tensor Core operations.Calling `plan.run()` will generate the CUTLASS C++ kernel in question, compile it, and run it on the tensors we previously passed in. By setting `print_module` to `true`, the C++ code that is emitted is printed.<jupyter_code># We specify `element_accumulator` here so as to match the kernel run by NumPy below. However,
# specifying `element_accumulator` is not required if it is the same as `element`
plan = cutlass.Gemm(element=dtype, layout=cutlass.LayoutType.RowMajor, element_accumulator=np.float32)
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output>// Gemm operator cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_256x128_64x3_tt_align8
using cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_256x128_64x3_tt_align8_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<256, 128, 64>,
cutlass::gemm::GemmShape<64, 64, 64>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<cutlass::half_t, 8, float, float>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
3,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
// Define named type
struct cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_256x128_64x3_tt_align8_type :
public cutlass_sm80_tensorop_f16_s1[...]<jupyter_text>There are many other ways to construct a plan from `cutlass.op.Gemm` (e.g., by specifiying they types and layouts of each operand, by providing representative tensors as inputs). For more details on these, see the documentation in the `cutlass.op.Gemm` constructor. We then compare the output to running the GEMM using NumPy.<jupyter_code>tensor_D_numpy = (alpha * (tensor_A @ tensor_B)) + (beta * tensor_C)
np.testing.assert_array_equal(tensor_D, tensor_D_numpy)<jupyter_output><empty_output><jupyter_text>Note that one could use the same kernel just declared for tensors provided by other frameworks beyond NumPy, such as PyTorch or CuPy. Changing operation modesBy default, the CUTLASS Python interface will try to use Tensor Core operations whenever possible. If the configuration provided to `cutlass.op.Gemm` is not supported on Tensor Cores, the interface will fall back to using a SIMT kernel.The operation mode currently in use can be returned via the `plan.opclass` property. In this case Tensor Core operations.<jupyter_code>print(plan.opclass)<jupyter_output>OpcodeClass.TensorOp<jupyter_text>Suppose that we don't want to use Tensor Cores for this GEMM. One can change to using CUTLASS's SIMT GEMMs by setting the plan's `opclass` field.As is shown in the printed output, the emitted kernel uses template parameters that fit CUTLASS's SIMT GEMMs.Also notice that, this time around, we provided tensor parameters to `plan.run()`. One is free to provide different parameters to `plan.run()` than were passed in at the initial call to `cutlass.op.Gemm`, provided that the passed-in tensors have the same data type and layout as those passed in on intialization.<jupyter_code>tensor_D_simt = np.zeros(tensor_C.shape).astype(type_D)
plan.opclass = cutlass.OpcodeClass.Simt
plan.run(tensor_A, tensor_B, tensor_C, tensor_D_simt, alpha, beta, print_module=print_module)<jupyter_output>// Gemm operator cutlass_sm80_simt_f16_sgemm_f16_1x1x1_128x128_8x2_tt_align1
using cutlass_sm80_simt_f16_sgemm_f16_1x1x1_128x128_8x2_tt_align1_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 1,
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 1,
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassSimt,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 8>,
cutlass::gemm::GemmShape<32, 64, 8>,
cutlass::gemm::GemmShape<1, 1, 1>,
cutlass::epilogue::thread::LinearCombination<cutlass::half_t, 1, float, float>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
2,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
// Define named type
struct cutlass_sm80_simt_f16_sgemm_f16_1x1x1_128x128_8x2_tt_align1_type :
public cutlass_sm80_simt_f16_sgemm_f16_1x1x1_128x128_8x2_tt_align1_base { };<jupyter_text>If we compare the output of the Tensor Core and SIMT GEMMs we just ran we see that they are equal.<jupyter_code>np.testing.assert_array_equal(tensor_D, tensor_D_simt)<jupyter_output><empty_output><jupyter_text>Running cached kernelsYou may have noticed that the `plan.run()` calls for the previous two kernels took some time to execute. This is because the kernel being emitted had not yet been compiled.CUTLASS caches compiled binaries so that recompilation isn't necessary every time a kernel is run. For example, if we change modes back to using Tensor Cores and call `plan.run()` again (with a different set of tensor parameters), you'll find the call to return much faster.<jupyter_code>m = 2400
n = 3232
k = 4096
tensor_A = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, k)).astype(type_A))
tensor_B = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(k, n)).astype(type_B))
tensor_C = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, n)).astype(type_C))
tensor_D = np.zeros(tensor_C.shape).astype(type_D)
alpha = np.float16(1.)
beta = np.float16(2.)
plan.opclass = cutlass.OpcodeClass.TensorOp
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)<jupyter_output>// Gemm operator cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_256x128_64x3_tt_align8
using cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_256x128_64x3_tt_align8_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<256, 128, 64>,
cutlass::gemm::GemmShape<64, 64, 64>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<cutlass::half_t, 8, float, float>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
3,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
// Define named type
struct cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_256x128_64x3_tt_align8_type :
public cutlass_sm80_tensorop_f16_s1[...]<jupyter_text>Running non-default GEMMsThe previous examples showed how it is simple to get started running a default GEMM kernel in CUTLASS. But, what do you do if you want a bit more control over the parameters to the GEMM?Under the hood, CUTLASS enumerates the different GEMM configuration parameters possible for this kernel from the CUTLASS profiler. The code below shows how one can access the tile descriptions for the kernels (e.g., cluster, threadblock, and warp shape).<jupyter_code>tiles = plan.tile_descriptions()
print('{} tile descriptions returned'.format(len(tiles)))
num_print = 10
print('First {} tile descriptions are:'.format(num_print))
for td in tiles[:num_print]:
print(td)<jupyter_output>132 tile descriptions returned
First 10 tile descriptions are:
{
ClusterShape: [1, 1, 1]
ThreadblockShape: [256, 128, 64]
WarpCount: [4, 2, 1]
Stages: 3
Kernel schedule: ScheduleAuto
}
{
ClusterShape: [1, 1, 1]
ThreadblockShape: [128, 256, 64]
WarpCount: [2, 4, 1]
Stages: 3
Kernel schedule: ScheduleAuto
}
{
ClusterShape: [1, 1, 1]
ThreadblockShape: [256, 128, 64]
WarpCount: [4, 2, 1]
Stages: 3
Kernel schedule: ScheduleAuto
}
{
ClusterShape: [1, 1, 1]
ThreadblockShape: [128, 256, 64]
WarpCount: [2, 4, 1]
Stages: 3
Kernel schedule: ScheduleAuto
}
{
ClusterShape: [1, 1, 1]
ThreadblockShape: [256, 128, 32]
WarpCount: [4, 2, 1]
Stages: 3
Kernel schedule: ScheduleAuto
}
{
ClusterShape: [1, 1, 1]
ThreadblockShape: [128, 256, 32]
WarpCount: [2, 4, 1]
Stages: 3
Kernel schedule: ScheduleAuto
}
{
ClusterShape: [1, 1, 1]
ThreadblockShape: [256, 64, 64]
WarpCount: [4, 1, 1]
Stages: 4
Kernel schedule: ScheduleAuto
}
{
Cl[...]<jupyter_text>Next, we'll pick one of these configurations at random and compile and run it.<jupyter_code>idx = random.randint(0, len(tiles)-1)
td = tiles[idx]
print('Tile description {} is: {}'.format(idx, td))
plan.compile(td)
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)<jupyter_output>Tile description 112 is:
{
ClusterShape: [1, 1, 1]
ThreadblockShape: [128, 128, 32]
WarpCount: [2, 2, 1]
Stages: 4
Kernel schedule: ScheduleAuto
}<jupyter_text>One can also change the swizzling function used by the kernel. For example, one can modify the kernel to use the stream K feature of CUTLASS via:<jupyter_code># Stream K is only supported pre-SM90 (at least when this example was written)
if plan.cc != 90:
plan.swizzling_functor = cutlass.swizzle.ThreadblockSwizzleStreamK
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, print_module=print_module)<jupyter_output>// Gemm operator cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_128x128_32x4_tt_align8
using cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_128x128_32x4_tt_align8_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<cutlass::half_t, 8, float, float>,
cutlass::gemm::threadblock::ThreadblockSwizzleStreamK,
4,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
// Define named type
struct cutlass_sm80_tensorop_f16_s16x8x16gemm_f16_1x1x1_128x128_32x4_tt_align8_type :
public cutlass_sm80_tensorop_f16_s16x8x16ge[...]<jupyter_text>Handling errorsThe CUTLASS Python interface attempts to catch runtime and compilation errors in Python so as to provide more understandable error messages.Here's an example in which we try to use too many stages for a given GEMM kernel. Normally, this would result in a runtime error due to the GPU having insufficient shared memory to launch the kernel with 8 stages. The CUTLASS Python interface is able to detect this issue before compiling the kernel, and reports it back to the user.<jupyter_code># td = tiles[0]
# td.stages = 8
# plan.compile(td)<jupyter_output><empty_output> | python/docs/externals/00_basic_gemm.ipynb/0 | {
"file_path": "python/docs/externals/00_basic_gemm.ipynb",
"repo_id": "python",
"token_count": 4739
} | 55 |
################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Unit test for store nodes in SM90
"""
import logging
import unittest
import cutlass
from cutlass.backend import *
from cutlass.epilogue import *
from utils.evt_testbed import EVTTestBed, EVTTestCaseBase
cutlass.set_log_level(logging.WARNING)
@unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]")
class TestEVTLayout(EVTTestCaseBase):
def test_permute_1(self):
"""
Returning a tensor with shape [m, n]
"""
def evt_permute(accum, alpha, C):
F = alpha * accum
F_permute = permute(F, indices=(0, 2, 1))
D_permute = F_permute + permute(C, indices=(0, 2, 1))
D = permute(D_permute, indices=(0, 2, 1))
return D, F
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 0.5,
"C": self.fake_tensor(self.element, (l, m, n)),
"F": self.fake_tensor(self.element, (l, m, n)),
"D": self.fake_tensor(self.element, (l, m, n)),
}
launcher = EVTTestBed(self.element, evt_permute, example_inputs)
input_keys = ["C", "alpha"]
result_keys = ["D", "F"]
launcher.verify((m, n, k), input_keys, result_keys, l)
@unittest.skipIf(device_cc() != 90, "This unittest is for cc = Sm90 only")
def test_permute_2(self):
"""
Returning a tensor with shape [m, n]
"""
def evt_permute(accum, alpha, C):
F = alpha * accum
F_permute = permute(F, indices=(0, 2, 1))
D = F_permute + C
return D, F
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 0.5,
"C": self.fake_tensor(self.element, (l, n, m)),
"F": self.fake_tensor(self.element, (l, m, n)),
"D": self.fake_tensor(self.element, (l, n, m)),
}
launcher = EVTTestBed(self.element, evt_permute, example_inputs)
input_keys = ["C", "alpha"]
result_keys = ["D", "F"]
launcher.verify((m, n, k), input_keys, result_keys, l)
@unittest.skipIf(device_cc() != 90, "This unittest is for cc = Sm90 only")
def test_permute_3(self):
"""
Returning a tensor with shape [m, n]
"""
def evt_permute(accum, alpha, C):
F = alpha * accum
F_permute = permute(F, indices=(1, 0, 2))
D = F_permute + C
return D, F
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 0.5,
"C": self.fake_tensor(self.element, (m, l, n)),
"F": self.fake_tensor(self.element, (l, m, n)),
"D": self.fake_tensor(self.element, (m, l, n)),
}
launcher = EVTTestBed(self.element, evt_permute, example_inputs)
input_keys = ["C", "alpha"]
result_keys = ["D", "F"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_reshape(self):
"""
Test reshape
"""
def evt_reshape(accum, alpha, TensorE):
F = alpha * accum
E_reshape = reshape(TensorE, new_shape=(512, 1))
D = F + E_reshape
return D
example_inputs = {
"accum": self.fake_tensor(self.element, (self.l, self.m, self.n)),
"alpha": 0.5,
"TensorE": self.fake_tensor(self.element, (16, 32)),
"D": self.fake_tensor(self.element, (self.l, self.m, self.n)),
}
launcher = EVTTestBed(self.element, evt_reshape, example_inputs)
input_keys = ["alpha", "TensorE"]
result_keys = ["D"]
launcher.verify(self.problem_size, input_keys, result_keys, self.l)
def test_reshape2(self):
"""
Test reshape
"""
def evt_reshape(accum, alpha, TensorE):
F = alpha * accum
F_reshape = reshape(F, new_shape=(2, 3, 512, 256))
D = F_reshape + TensorE
return D
example_inputs = {
"accum": self.fake_tensor(self.element, (self.l, self.m, self.n)),
"alpha": 0.5,
"TensorE": self.fake_tensor(self.element, (2, 3, 1, self.n)),
"D": self.fake_tensor(self.element, (2, 3, self.m, self.n)),
}
launcher = EVTTestBed(self.element, evt_reshape, example_inputs)
input_keys = ["alpha", "TensorE"]
result_keys = ["D"]
launcher.verify(self.problem_size, input_keys, result_keys, self.l)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/evt/evt_layout_sm80_90.py/0 | {
"file_path": "test/python/cutlass/evt/evt_layout_sm80_90.py",
"repo_id": "test",
"token_count": 3030
} | 56 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from math import prod
import os
import re
import subprocess
import torch
from cutlass_library import (
DataType,
DataTypeSize,
GemmUniversalMode,
LayoutType,
OpcodeClass,
ShortDataTypeNames,
SwizzlingFunctor
)
from cutlass.backend import compiler
from cutlass.backend.gemm_operation import GemmArguments, GemmOperationUniversal
from cutlass.backend.reduction_operation import ReductionArguments, ReductionOperation
from cutlass.shape import GemmCoord, MatrixCoord
from cutlass.utils.datatypes import torch_type
class GemmUniversalLauncher:
def __init__(
self,
operation,
seed=2080,
verification=True,
iterations=500,
compiler_mode= "nvcc",
**kwargs,
) -> None:
self.math_operation = operation.tile_description.math_instruction.math_operation
self.verification = verification
if compiler_mode == "nvcc":
compiler.nvcc()
elif compiler_mode == "nvrtc":
compiler.nvrtc()
else:
raise Exception(f"Unexpected compiler string {compiler_mode}")
op_list = [operation]
if operation.arch < 90:
# Split K via Python is currently only supported for pre-SM90 kernels
self.reduction_operation: ReductionOperation = ReductionOperation(
shape=MatrixCoord(4, 32 * operation.C.alignment),
C=operation.C,
element_accumulator=operation.tile_description.math_instruction.element_accumulator,
element_compute=operation.epilogue_functor.element_epilogue,
epilogue_functor=operation.epilogue_functor,
count=operation.C.alignment,
)
op_list.append(self.reduction_operation)
compiler.add_module(op_list, bypass_cache=False)
self.operation = operation
self.dtype_A = torch_type(operation.A.element if not self.operation.switched else self.operation.B.element)
self.dtype_B = torch_type(operation.B.element if not self.operation.switched else self.operation.A.element)
self.dtype_C = torch_type(operation.C.element)
self.dtype_D = torch_type(operation.epilogue_functor.element_output)
element_size = min(DataTypeSize[operation.A.element], DataTypeSize[operation.B.element])
if element_size == 1:
self.rand_max = 1
self.rand_min = 0
elif element_size <= 8:
self.rand_max = 1
self.rand_min = -1
elif element_size == 16:
self.rand_max = 4
self.rand_min = -4
else:
self.rand_max = 8
self.rand_min = -8
self.seed = seed
self.compute_type = operation.epilogue_functor.element_epilogue
self.accumulator_type = operation.tile_description.math_instruction.element_accumulator
def print_problem_size(self, p, mode, batch_count):
if mode == GemmUniversalMode.Gemm:
mode = "Gemm"
elif mode == GemmUniversalMode.Batched:
mode = "GemmBatched"
elif mode == GemmUniversalMode.GemmSplitKParallel:
mode = "GemmSplitKParallel"
print(f"problem: {p.m}, {p.n}, {p.k}\n batch_count: {batch_count}\n mode: {mode}")
def uniform_init(self, shape, dtype, layout):
size = prod(shape)
if dtype.is_floating_point:
# Initialize data in FP32 and call convert to the data type we desire.
# This is a workaround for the following error that occurs when attempting to
# call uniform_ on a tensor with torch.float8_e4m3fn data:
# RuntimeError: "check_uniform_bounds" not implemented for 'Float8_e4m3fn'
data = torch.ceil(
torch.empty(size=(size,), dtype=torch.float32, device="cuda").uniform_(
self.rand_min - 0.5, self.rand_max - 0.5)
).to(dtype)
else:
# PyTorch does not currently support integer-typed matrix multiplications on GPU.
# Fall back to CPU for integer type references.
data = torch.empty(size=(size,), dtype=dtype, device="cpu").random_(self.rand_min, self.rand_max + 1)
is_fp8 = dtype == getattr(torch, "float8_e4m3fn", -1) or dtype == dtype == getattr(torch, "float8_e5m2", -1)
if dtype == torch.float64 or dtype == torch.float32 or is_fp8:
data = data.to("cpu")
data_ref = data.reshape(shape)
if layout == LayoutType.RowMajor:
data_cutlass = data_ref
else:
data_cutlass = data_ref.transpose(-1, -2).contiguous()
data_cutlass = data_cutlass.to("cuda")
# As of this writing, few operations in PyTorch are supported with FP8 data.
# Thus, we perform computation in FP32 for FP8 reference checks.
if is_fp8:
data_ref = data_ref.to(torch.float32)
return data_cutlass, data_ref
def reference(self, problem_size, tensor_A, tensor_B, tensor_C, alpha, beta):
# If any tensor is on CPU, place all tensors on CPU unless only
# tensor C is on CPU
# Handle mixed-input cases by casting to the larger data type and overriding
# to whatever the data type of the larger type is
if self.dtype_A != self.dtype_B:
if DataTypeSize[self.operation.A.element] < DataTypeSize[self.operation.B.element]:
tensor_A = tensor_A.to(self.dtype_B).to(tensor_B.device)
else:
tensor_B = tensor_B.to(self.dtype_A).to(tensor_A.device)
devices = [x.device.type for x in [tensor_A, tensor_B]]
if tensor_C is not None:
devices.append(tensor_C.device.type)
if "cpu" in devices and devices != ["cuda", "cuda", "cpu"]:
device = torch.device("cpu")
else:
device = tensor_A.device
tensor_A = tensor_A.to(device)
tensor_B = tensor_B.to(device)
if tensor_C is not None:
tensor_C = tensor_C.to(device)
dtype = torch_type(self.compute_type)
alpha_torch = torch.tensor([alpha], device=device).to(dtype)
beta_torch = torch.tensor([beta], device=device).to(dtype)
tmp = tensor_A @ tensor_B
tensor_D_ref = (alpha_torch * tmp)
if tensor_C is not None:
tensor_D_ref += (tensor_C * beta_torch)
return tensor_D_ref.to(self.dtype_D)
def run(self, mode, problem_size, batch_count=1, split_k_slices=1, alpha=1.0, beta=0.0):
torch.random.manual_seed(self.seed)
# Assign an actual batch count in cases where we are not running in batched mode.
# This is to differentiate between the number of split K slices and the batch count,
# which are overloaded within the single `batch_count` variable.
if mode == GemmUniversalMode.Batched:
true_batch_count = batch_count
else:
true_batch_count = 1
def transpose(layout):
if layout == LayoutType.RowMajor:
return LayoutType.ColumnMajor
else:
return LayoutType.RowMajor
tensor_A, tensor_A_ref = self.uniform_init(
(true_batch_count, problem_size.m, problem_size.k),
self.dtype_A,
self.operation.A.layout if not self.operation.switched else transpose(self.operation.B.layout),
)
tensor_B, tensor_B_ref = self.uniform_init(
(true_batch_count, problem_size.k, problem_size.n),
self.dtype_B,
self.operation.B.layout if not self.operation.switched else transpose(self.operation.A.layout),
)
if self.dtype_C is not None:
tensor_C, tensor_C_ref = self.uniform_init(
(true_batch_count, problem_size.m, problem_size.n),
self.dtype_C,
self.operation.C.layout if not self.operation.switched else transpose(self.operation.C.layout),
)
else:
tensor_C = None
tensor_C_ref = None
tensor_D, _ = self.uniform_init(
(true_batch_count, problem_size.m, problem_size.n),
self.dtype_D,
self.operation.C.layout if not self.operation.switched else transpose(self.operation.C.layout),
)
tensor_D = torch.zeros_like(tensor_D)
if self.compute_type in [DataType.s8, DataType.s32, DataType.u8, DataType.u32]:
alpha = int(alpha)
beta = int(beta)
#
# Launch kernel
#
arguments = GemmArguments(
operation=self.operation,
problem_size=problem_size,
A=tensor_A,
B=tensor_B,
C=tensor_C,
D=tensor_D,
output_op=self.operation.epilogue_type(alpha, beta),
gemm_mode=mode,
split_k_slices=split_k_slices,
batch=batch_count,
)
if mode == GemmUniversalMode.GemmSplitKParallel:
reduction_arguments = ReductionArguments(
self.reduction_operation,
problem_size=[problem_size.m, problem_size.n],
partitions=split_k_slices,
workspace=arguments.ptr_D,
destination=tensor_D,
source=tensor_C,
output_op=self.reduction_operation.epilogue_type(alpha, beta),
)
self.operation.run(arguments)
if mode == GemmUniversalMode.GemmSplitKParallel:
self.reduction_operation.run(reduction_arguments)
passed = True
if self.verification:
if mode == GemmUniversalMode.GemmSplitKParallel:
reduction_arguments.sync()
# Free memory allocated by args because we are not
# calling `arguments.sync()` in this case (which will free memory)
arguments.free()
else:
arguments.sync()
tensor_D_ref = self.reference(
problem_size,
tensor_A_ref,
tensor_B_ref,
tensor_C_ref,
alpha,
beta,
)
tensor_D_ref = tensor_D_ref.to('cuda')
if self.operation.switched or self.operation.C.layout == LayoutType.ColumnMajor:
tensor_D = tensor_D.transpose(-1, -2).contiguous()
passed = tensor_D.equal(tensor_D_ref)
try:
assert passed
except AssertionError:
self.print_problem_size(problem_size, mode, batch_count)
del arguments
if mode == GemmUniversalMode.GemmSplitKParallel:
del reduction_arguments
return passed
def test_all_gemm(operation: "GemmOperationUniversal", testcase="universal", compilation_mode="nvcc"):
passed = True
minimum_operand_element_size = min(
DataTypeSize[operation.A.element], DataTypeSize[operation.B.element]
)
opcode_class = operation.tile_description.math_instruction.opcode_class
if opcode_class == OpcodeClass.Simt:
alignment = 1
else:
alignment = 128 // minimum_operand_element_size
alignment_m = alignment
alignment_n = alignment
alignment_k = alignment
# INT8 alignment constraints
if opcode_class == OpcodeClass.Simt:
A_is_s8 = operation.A.element == DataType.s8
B_is_s8 = operation.B.element == DataType.s8
if A_is_s8 and operation.A.layout == LayoutType.ColumnMajor:
alignment_m = 4
if B_is_s8 == DataType.s8 and operation.A.layout == LayoutType.RowMajor:
alignment_n = 4
if A_is_s8 and B_is_s8 and (operation.A.layout == LayoutType.RowMajor or operation.B.layout == LayoutType.ColumnMajor):
alignment_k = 4
threadblock_k = operation.tile_description.threadblock_shape[2]
assert testcase != "interleaved"
supports_split_k = operation.arch < 90 and not operation.swizzling_functor == SwizzlingFunctor.StreamK
if testcase == "multistage":
modes = [GemmUniversalMode.Gemm]
problem_size_m = [16, 528]
problem_size_n = [16, 528]
problem_size_k = [
threadblock_k,
threadblock_k * operation.tile_description.stages
+ operation.tile_description.math_instruction.instruction_shape[2],
]
problem_alpha = [1.0]
problem_beta = [0.0]
batch_counts = [1]
else:
modes = [GemmUniversalMode.Gemm]
batch_counts = [1, 2, 3, 5, 7]
if supports_split_k:
modes.append(GemmUniversalMode.GemmSplitKParallel)
problem_size_m = [alignment_m, 512 - 3 * alignment_m]
problem_size_n = [alignment_n, 512 - 2 * alignment_n]
if operation.tile_description.stages is None:
stages_for_k_calc = 7
else:
stages_for_k_calc = operation.tile_description.stages
problem_size_k = [
alignment_k,
threadblock_k * stages_for_k_calc - alignment_k,
threadblock_k * stages_for_k_calc * 3 - alignment_k,
]
problem_alpha = [1.0]
problem_beta = [2.0]
testbed = GemmUniversalLauncher(operation, compiler_mode=compilation_mode)
for mode in modes:
for m in problem_size_m:
for n in problem_size_n:
for k in problem_size_k:
for batch_count in batch_counts:
for alpha in problem_alpha:
for beta in problem_beta:
# skip very small K problems
if testcase == "universal":
if k // batch_count < 2 * threadblock_k:
continue
problem_size = GemmCoord(m, n, k)
if supports_split_k:
split_k_slices = batch_count
else:
split_k_slices = 1
overridden_mode = mode
if mode == GemmUniversalMode.Gemm and batch_count > 1:
overridden_mode = GemmUniversalMode.Batched
passed = testbed.run(
overridden_mode,
problem_size,
batch_count,
split_k_slices,
alpha,
beta,
)
if not passed:
return False
return passed
| test/python/cutlass/gemm/gemm_testbed.py/0 | {
"file_path": "test/python/cutlass/gemm/gemm_testbed.py",
"repo_id": "test",
"token_count": 7721
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implicit GEMM testbed
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/reduction/device/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "conv2d_problems.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/core_io.h"
#include "cutlass/util/tensor_view_io.h"
#include "../cache_testbed_output.h"
namespace test {
namespace conv {
namespace device {
template <typename Conv2d, int InterleavedK>
class InterleavedTestbedConv2d {
public:
using ElementA = typename Conv2d::ElementA;
using LayoutA = typename Conv2d::LayoutA;
using ElementB = typename Conv2d::ElementB;
using LayoutB = typename Conv2d::LayoutB;
using ElementC = typename Conv2d::ElementC;
using LayoutC = typename Conv2d::LayoutC;
using ElementAccumulator = typename Conv2d::ElementAccumulator;
using ElementCompute = typename Conv2d::ElementCompute;
using EpilogueOutputOp = typename Conv2d::EpilogueOutputOp;
static cutlass::conv::Operator const kConvolutionalOperator = Conv2d::kConvolutionalOperator;
/// Reduction kernel
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>;
using ReductionDevice = cutlass::reduction::device::ReduceSplitK<ReductionKernel>;
using ReductionStrideIndex = typename ReductionDevice::StrideIndex;
public:
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementB, LayoutB> tensor_B_reordered;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
public:
InterleavedTestbedConv2d(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) {
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
void initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
int scope;
int bits = cutlass::sizeof_bits<Element>::value;
if (bits <= 8) {
scope = 2;
}
else if (bits == 16) {
scope = 3;
}
else {
scope = 8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope, -scope, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(), view.capacity());
}
else {
}
}
void initialize(
cutlass::conv::Conv2dProblemSize const &problem_size, uint64_t seed = 2019) {
tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size));
tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_B_reordered.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
initialize_tensor(tensor_A.host_view(), init_A, seed);
initialize_tensor(tensor_B.host_view(), init_B, seed * 17);
initialize_tensor(tensor_C.host_view(), init_C, seed * 39);
cutlass::reorder_convK<InterleavedK>(
tensor_B_reordered.host_ref(), tensor_B.host_ref(), implicit_gemm_problem_size(kConvolutionalOperator, problem_size));
tensor_A.sync_device();
tensor_B.sync_device();
tensor_B_reordered.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
tensor_D_reference.sync_device();
}
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Conv2d::UnderlyingKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerMultiprocessor < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::conv::Conv2dProblemSize const &problem_size,
cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
#if 0 //display conv2d problem size for debugging
std::cout << problem_size << std::endl
<< "alpha, beta: (" << float(alpha) << ", " << float(beta) << ")" << std::endl
<< "split_k_mode: " << ((split_k_mode == cutlass::conv::SplitKMode::kSerial) ? "(serial)" : "(parallel)") << std::endl
<< std::endl;
#endif
initialize(problem_size);
// configure the operator
Conv2d conv2d_op;
typename Conv2d::Arguments conv2d_args(
problem_size,
tensor_A.device_ref(),
tensor_B_reordered.device_ref(),
tensor_C.device_ref(),
tensor_D_computed.device_ref(),
{alpha, beta},
split_k_mode
);
// find workspace requirement for parallel split-k reduction
size_t workspace_size = Conv2d::get_workspace_size(conv2d_args);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = conv2d_op.initialize(conv2d_args, workspace.get());
// conv2d operation with parallel split-k-mode
if (split_k_mode == cutlass::conv::SplitKMode::kParallel) {
// conv2d output is written to workspace in global memory
conv2d_args.ref_D.reset(reinterpret_cast<ElementC*>(workspace.get()));
// accumulate mma for each cta in k-dimension (1.0 * A * B)
conv2d_args.output_op = {ElementCompute(1), ElementCompute(0)};
// update conv2d operator arguments
status = conv2d_op.update(conv2d_args, workspace.get());
}
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
// run conv2d operator
status = conv2d_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
if (split_k_mode == cutlass::conv::SplitKMode::kParallel) {
// configure parallel reduction operator
ReductionDevice reduction_op;
typename ReductionDevice::Arguments reduction_args(
cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(),
problem_size.split_k_slices,
cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, problem_size),
{
reinterpret_cast<ElementAccumulator*> (workspace.get()),
ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx])
},
{
tensor_D_computed.device_data(),
ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx])
},
{
tensor_C.device_data(),
ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx])
},
// apply alpha, beta to obtain the following equation alpha * ReduceAdd(A * B) + beta * C
{alpha, beta}
);
status = reduction_op.initialize(reduction_args, nullptr);
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
// run prallel reduction kernel
status = reduction_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
}
bool passed = false;
tensor_D_computed.sync_host();
//
// Reference check - support caching results
//
CachedTestKey cached_test_key = CreateCachedConv2dTestKey<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
ElementCompute
>(
kConvolutionalOperator,
problem_size,
alpha,
beta,
tensor_A.host_view(),
tensor_B.host_view(),
tensor_C.host_view()
);
//
// Look for the cached key
//
bool cached_result_loaded = false;
CachedTestResult cached_test_result;
std::string conv2d_result_cache_name =
std::string("cached_results_") + CUTLASS_TARGET_NAME + ".txt";
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
CachedTestResultListing cached_results(conv2d_result_cache_name);
auto cached = cached_results.find(cached_test_key);
cached_result_loaded = cached.first;
if (cached_result_loaded) {
cached_test_result = cached.second;
}
}
if (!cached_result_loaded) {
#if CUTLASS_CONV_TEST_UNIT_REFERENCE_DEVICE_ENABLED
cutlass::reference::device::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
cutlass::NumericConverterClamp<ElementC, ElementCompute>
>(
kConvolutionalOperator,
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D_reference.device_ref(),
alpha,
beta);
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " device reference error: "
<< cudaGetErrorString(result);
// sync host (copy device data to host) for dumping error output in case of mismatches
tensor_D_reference.sync_host();
#else
cutlass::reference::host::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ElementC,
cutlass::NumericConverterClamp<ElementC, ElementCompute>
>(
kConvolutionalOperator,
problem_size,
tensor_A.host_ref(),
tensor_B.host_ref(),
tensor_C.host_ref(),
tensor_D_reference.host_ref(),
alpha,
beta);
#endif
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
cached_test_result.D = TensorHash(tensor_D_reference.host_view());
CachedTestResultListing cached_results(conv2d_result_cache_name);
cached_results.append(cached_test_key, cached_test_result);
cached_results.write(conv2d_result_cache_name);
}
} // if (!cached_result_loaded)
uint32_t tensor_D_hash = TensorHash(tensor_D_computed.host_view());
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
passed = (tensor_D_hash == cached_test_result.D);
EXPECT_EQ(tensor_D_hash, cached_test_result.D)
<< "Hash-based comparison failed for key:" << "\n" << cached_test_key << "\n";
}
else {
passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view());
}
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Conv2d_ImplicitGemm_device_"
<< (split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial_reduction_" : "parallel_reduction_")
<< (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kFprop ? "fprop_" :
(Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ? "dgrad_" : "wgrad_"))
<< "ncxhwx_"
<< problem_size.N << "x"
<< problem_size.H << "x"
<< problem_size.W << "x"
<< problem_size.C
<< "_cxrskx_"
<< problem_size.K << "x"
<< problem_size.R << "x"
<< problem_size.S << "x"
<< problem_size.C
<< "_padding_"
<< problem_size.pad_h << "x"
<< problem_size.pad_w
<< "_stride_"
<< problem_size.stride_h << "x"
<< problem_size.stride_w
<< "_dilation_"
<< problem_size.dilation_h << "x"
<< problem_size.dilation_w << "_"
<< (problem_size.mode == cutlass::conv::Mode::kCrossCorrelation ? "xcorr_" : "conv_")
<< Conv2d::ThreadblockShape::kM << "x"
<< Conv2d::ThreadblockShape::kN << "x"
<< Conv2d::ThreadblockShape::kK << "_"
<< Conv2d::WarpShape::kM << "x"
<< Conv2d::WarpShape::kN << "x"
<< Conv2d::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nB:\n" << tensor_B.host_view() << "\n"
<< "\nC:\n" << tensor_C.host_view() << "\n";
results << "\nD reference (hash: " << cached_test_result.D << ")\n";
if (!cached_result_loaded) {
results
<< tensor_D_reference.host_view() << "\n";
}
results
<< "\nD computed (hash: " << tensor_D_hash << ")\n"
<< tensor_D_computed.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// TestAllConv: Runs cutlass::conv::device::ImplicitGemmConvolution operator and compares it with reference
// TestAllConv runs conv operator on default conv problem sizes from test::conv::device::TestbedConv2dProblemSizes
// Additionally, each conv2d test can provide conv problem sizes (conv_test_sizes) and blacklist of sizes
// (conv_blacklist_sizes)
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ImplicitGemm, int InterleavedK>
bool TestAllInterleavedConv2d(
const Conv2dProblemVector & conv_test_sizes = Conv2dProblemVector(),
const Conv2dProblemVector & conv_blacklist_sizes = Conv2dProblemVector()) {
bool passed = true;
//
// Testbed object
//
InterleavedTestbedConv2d<ImplicitGemm, InterleavedK> testbed;
//
// Get conv problem sizes to run conv operator
//
TestbedConv2dProblemSizes conv_problems(InterleavedK); // minimum channel size must be multiple of InterleavedK for interleaved layout
// Vector of conv2d problem sizes to avoid duplicate runs
Conv2dProblemVector conv_tested_sizes;
Conv2dProblemVector const *problem_vectors[] = {
&conv_test_sizes, // run user specified sizes
&conv_problems.conv2d_default_sizes, // run default and cudnn bug sizes
&conv_problems.conv2d_resnet50_sizes, // run resnet50 sizes
#if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
&conv_problems.conv2d_rigorous_sizes, // run large and rigorous sizes if enabled
#endif
};
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for (Conv2dProblemVector const * problem_vector : problem_vectors) {
ChannelDivisibilitySpecification channel_spec(InterleavedK); //input and output channels must be multiple of InterleavedK
auto pruned_problem_vector = prune(*problem_vector, channel_spec);
// Run conv testbed on default convolution sizes
for(auto conv_problem : pruned_problem_vector) {
// Skip blacklist and avoid duplicate problem sizes
if (std::find(conv_blacklist_sizes.begin(), conv_blacklist_sizes.end(), conv_problem) != conv_blacklist_sizes.end() ||
std::find(conv_tested_sizes.begin(), conv_tested_sizes.end(), conv_problem) != conv_tested_sizes.end()) {
continue;
}
//
// Procedurally disable certain cases
//
// CUTLASS DGRAD's unity stride specialization only support stride {1, 1}
if ((ImplicitGemm::kConvolutionalOperator ==
cutlass::conv::Operator::kDgrad) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kUnity)) {
if (!((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) {
continue;
}
}
//
// Test
//
// push back tested problem size to avoid re-running duplicates
conv_tested_sizes.push_back(conv_problem);
// test mode = xcross
passed = testbed.run(
conv_problem,
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// test mode = convolution
passed = testbed.run(
conv_problem.reset_mode(cutlass::conv::Mode::kConvolution),
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
}
}
#if 0
// Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for
// a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// alpha and beta for local testing, but only runs one value for alpha and beta.
cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size (
{1, 17, 11, 288}, // input size (NHWC)
{160, 3, 3, 288}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
);
cutlass::conv::SplitKMode split_k_modes [] = {
cutlass::conv::SplitKMode::kSerial,
cutlass::conv::SplitKMode::kParallel,
};
int split_k_slices[] = {
1, 2, 3, 4, 201
};
double problem_alpha[] = {
2.0
};
double problem_beta[] = {
2.0
};
for (auto split_k_mode : split_k_modes) {
for (auto split_k_slice : split_k_slices) {
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
passed = testbed.run(
conv2d_split_k_test_size.reset_split_k_slices(split_k_slice),
split_k_mode,
cutlass::from_real<typename ImplicitGemm::ElementCompute>(alpha),
cutlass::from_real<typename ImplicitGemm::ElementCompute>(beta));
if (!passed) {
return false;
}
}
}
}
}
#endif
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace conv
} // namespace test
| test/unit/conv/device/conv2d_testbed_interleaved.h/0 | {
"file_path": "test/unit/conv/device/conv2d_testbed_interleaved.h",
"repo_id": "test",
"token_count": 8832
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide Depthwise Direct Conv interface
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_depthwise_fprop.h"
#include "cutlass/conv/device/direct_convolution.h"
#include "conv2d_testbed.h"
#include "depthwise_conv2d_direct_conv_testbed.h"
std::vector<cutlass::conv::Conv2dProblemSize> DepthwiseFpropProblemSizes_filter3x3() {
std::vector<cutlass::conv::Conv2dProblemSize> problems;
for (int channels = 16; channels <= 512; channels += 16) {
problems.push_back(cutlass::conv::Conv2dProblemSize(
{1, 8, 8, channels}, // input size (NHWC)
{channels, 3, 3, 1}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation, // Convolution mode
16, // split_k_slices
channels // groups
));
// if(channels == 512 || channels == 16*14)
problems.push_back(cutlass::conv::Conv2dProblemSize(
{1, 16, 16, channels}, // input size (NHWC)
{channels, 3, 3, 1}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{2, 2}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation, // Convolution mode
16, // split_k_slices
channels // groups
));
}
return problems;
}
std::vector<cutlass::conv::Conv2dProblemSize> DepthwiseFpropProblemSizes_filter5x5() {
std::vector<cutlass::conv::Conv2dProblemSize> problems;
for (int channels = 16; channels < 256; channels += 16) {
problems.push_back(cutlass::conv::Conv2dProblemSize(
{1, 16, 16, channels}, // input size (NHWC)
{channels, 5, 5, 1}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation, // Convolution mode
16, // split_k_slices
channels // groups
));
problems.push_back(cutlass::conv::Conv2dProblemSize(
{1, 112, 112, channels}, // input size (NHWC)
{channels, 5, 5, 1}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation, // Convolution mode
16, // split_k_slices
channels // groups
));
problems.push_back(cutlass::conv::Conv2dProblemSize(
{1, 112, 112, channels}, // input size (NHWC)
{channels, 5, 5, 1}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{2, 2}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation, // Convolution mode
16, // split_k_slices
channels // groups
));
}
return problems;
}
std::vector<cutlass::conv::Conv2dProblemSize> DepthwiseFpropProblemSizes_filter5x37() {
std::vector<cutlass::conv::Conv2dProblemSize> problems;
for (int channels = 16; channels < 256; channels += 16) {
problems.push_back(cutlass::conv::Conv2dProblemSize(
{1, 128, 128, channels}, // input size (NHWC)
{channels, 5, 37, 1}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
cutlass::conv::Mode::kCrossCorrelation, // Convolution mode
108, // split_k_slices
channels // groups
));
}
return problems;
}
////////////////////////////////////////////////////////////////////////////////
TEST(
SM60_Device_Depthwise_conv2d_Fprop_Direct_Conv_Optimized_f16nhwc_f16nhwc_f16nhwc_simt_f16,
64x32_4_8x32_3x3) {
using ElementInputA = cutlass::half_t;
using ElementInputB = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementComputeEpilogue = cutlass::half_t;
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU
// SM
using MMAOp = cutlass::arch::OpClassSimt;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm60;
// This code section describes the groups a thread block will compute
constexpr int groups_per_cta = 32;
// This code section describes the output tile <N, P, Q, C> a thread block will compute
using ThreadBlockOutputShape = cutlass::conv::TensorNHWCShape<1, 8, 8, groups_per_cta>;
// This code section describes the filter shape <R, S>
using FilterShape = cutlass::MatrixShape<3, 3>;
// Threadblock tile shape
using ThreadblockShape =
cutlass::gemm::GemmShape<ThreadBlockOutputShape::kNHW, groups_per_cta, FilterShape::kCount>;
// This code section describes tile size a warp will computes
using WarpShape = cutlass::gemm::GemmShape<8, groups_per_cta, FilterShape::kCount>;
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock =
cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle<
1,
ThreadBlockOutputShape::kN,
ThreadBlockOutputShape::kH,
ThreadBlockOutputShape::kW>;
// Number of pipelines you want to use
constexpr int NumStages = 4;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm =
cutlass::conv::IteratorAlgorithm::kOptimized;
constexpr int kEpilogueElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
kEpilogueElementsPerAccess, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue, // Data type for alpha/beta in linear combination
cutlass::epilogue::thread::ScaleType::Default>;
using DepthwiseDirect2dConv = typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConvFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm,
cutlass::conv::StrideSupport::kStrided>::Kernel;
using Direct2dConv = cutlass::conv::device::DirectConvolution<DepthwiseDirect2dConv>;
/// Run all unit test sizes with device-level Conv2d instance
EXPECT_TRUE(test::conv::device::TestSpecificDepthwiseDirectConv2d<Direct2dConv>(
DepthwiseFpropProblemSizes_filter3x3()));
}
////////////////////////////////////////////////////////////////////////////////
TEST(
SM60_Device_Depthwise_conv2d_Fprop_Direct_Conv_Optimized_f16nhwc_f16nhwc_f16nhwc_simt_f16,
64x64_3_16x64_5x5) {
using ElementInputA = cutlass::half_t;
using ElementInputB = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementComputeEpilogue = cutlass::half_t;
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU
// SM
using MMAOp = cutlass::arch::OpClassSimt;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm60;
// This code section describes the groups a thread block will compute
constexpr int groups_per_cta = 64;
// This code section describes the output tile <N, P, Q, C> a thread block will compute
using ThreadBlockOutputShape = cutlass::conv::TensorNHWCShape<1, 8, 8, groups_per_cta>;
// This code section describes the filter shape <R, S>
using FilterShape = cutlass::MatrixShape<5, 5>;
// Threadblock tile shape
using ThreadblockShape =
cutlass::gemm::GemmShape<ThreadBlockOutputShape::kNHW, groups_per_cta, FilterShape::kCount>;
// This code section describes tile size a warp will computes
using WarpShape = cutlass::gemm::GemmShape<16, groups_per_cta, FilterShape::kCount>;
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock =
cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle<
1,
ThreadBlockOutputShape::kN,
ThreadBlockOutputShape::kH,
ThreadBlockOutputShape::kW>;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm =
cutlass::conv::IteratorAlgorithm::kOptimized;
constexpr int kEpilogueElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
kEpilogueElementsPerAccess, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue, // Data type for alpha/beta in linear combination
cutlass::epilogue::thread::ScaleType::Default>;
using DepthwiseDirect2dConv = typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConvFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm,
cutlass::conv::StrideSupport::kStrided>::Kernel;
using Direct2dConv = cutlass::conv::device::DirectConvolution<DepthwiseDirect2dConv>;
/// Run all unit test sizes with device-level Conv2d instance
EXPECT_TRUE(test::conv::device::TestSpecificDepthwiseDirectConv2d<Direct2dConv>(
DepthwiseFpropProblemSizes_filter5x5()));
}
#if 0
////////////////////////////////////////////////////////////////////////////////
TEST(
SM60_Device_Depthwise_conv2d_Fprop_Direct_Conv_Optimized_f16nhwc_f16nhwc_f16nhwc_simt_f16,
64x32_3_16x32_5x37) {
using ElementInputA = cutlass::half_t;
using ElementInputB = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementComputeEpilogue = cutlass::half_t;
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU
// SM
using MMAOp = cutlass::arch::OpClassSimt;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm60;
// This code section describes the groups a thread block will compute
constexpr int groups_per_cta = 32;
// This code section describes the output tile <N, P, Q, C> a thread block will compute
using ThreadBlockOutputShape = cutlass::conv::TensorNHWCShape<1, 8, 8, groups_per_cta>;
// This code section describes the filter shape <R, S>
using FilterShape = cutlass::MatrixShape<5, 37>;
// Threadblock tile shape
using ThreadblockShape =
cutlass::gemm::GemmShape<ThreadBlockOutputShape::kNHW, groups_per_cta, FilterShape::kCount>;
// This code section describes tile size a warp will computes
using WarpShape = cutlass::gemm::GemmShape<16, groups_per_cta, FilterShape::kCount>;
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock =
cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle<
1,
ThreadBlockOutputShape::kN,
ThreadBlockOutputShape::kH,
ThreadBlockOutputShape::kW>;
// Number of pipelines you want to use
constexpr int NumStages = 2;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm =
cutlass::conv::IteratorAlgorithm::kOptimized;
constexpr int kEpilogueElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
kEpilogueElementsPerAccess, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue, // Data type for alpha/beta in linear combination
cutlass::epilogue::thread::ScaleType::Default>;
using DepthwiseDirect2dConv = typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConvFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm,
cutlass::conv::StrideSupport::kStrided>::Kernel;
using Direct2dConv = cutlass::conv::device::DirectConvolution<DepthwiseDirect2dConv>;
/// Run all unit test sizes with device-level Conv2d instance
EXPECT_TRUE(test::conv::device::TestSpecificDepthwiseDirectConv2d<Direct2dConv>(
DepthwiseFpropProblemSizes_filter5x37()));
}
#endif
| test/unit/conv/device/depthwise_conv2d_fprop_direct_conv_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu/0 | {
"file_path": "test/unit/conv/device/depthwise_conv2d_fprop_direct_conv_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu",
"repo_id": "test",
"token_count": 7430
} | 59 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "../common/cutlass_unit_test.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorView, rank2_contiguous_dynamic) {
int const M = 8;
int const N = 16;
typedef cutlass::TensorView<int, cutlass::layout::ContiguousMatrix> ContiguousTensorView;
cutlass::layout::Matrix layouts[] = {
cutlass::layout::Matrix::kColumnMajor,
cutlass::layout::Matrix::kRowMajor
};
cutlass::Coord<2> bounds = cutlass::make_Coord(M - 2, N - 2);
for (int i = 0; i < 2; ++i) {
int matrix_data[M * N] = { 0 };
int row_stride;
int col_stride;
if (layouts[i] == cutlass::layout::Matrix::kColumnMajor) {
row_stride = 1;
col_stride = M;
}
else {
row_stride = N;
col_stride = 1;
}
// Use helper to determine stride vector from leading dimension
ContiguousTensorView view(
matrix_data,
cutlass::layout::ContiguousMatrix::packed(cutlass::make_Coord(M, N), layouts[i]),
bounds);
ASSERT_TRUE(view.good());
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
cutlass::Coord<2> coord = cutlass::make_Coord(m, n);
if (view.contains(coord)) {
view.at(coord) = m * N + n;
}
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
int expected = 0;
if (m < bounds[0] && n < bounds[1]) {
expected = int(m * N + n);
}
EXPECT_EQ(matrix_data[m * row_stride + n * col_stride], expected);
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Uncomment the following line to observe output from printing TensorView objects
//
// #define OBSERVE_TENSORVIEW_IO // uncomment to enable printing
#ifdef OBSERVE_TENSORVIEW_IO
// This test construct a TensorView of rank=2 with matrix layouts known at runtime. This
// uses TensorRefMapFunc classes defined in cutlass/matrix_traits.h to define the mapping
// from logical tensor indices to storage in memory.
//
// Helpers in tools/util/tensor_view_io.h print both the logical TensorView and the
// linear memory of the tensor.
TEST(TensorView, contiguous) {
int const M = 8;
int const N = 16;
typedef cutlass::TensorView<
int32_t,
cutlass::layout::ContiguousLayout> ContiguousTensorView;
cutlass::layout::Matrix layouts[] = {
cutlass::layout::Matrix::kColumnMajor,
cutlass::layout::Matrix::kRowMajor
};
cutlass::Coord<2> bounds = cutlass::make_Coord(M, N);
for (int i = 0; i < 2; ++i) {
int matrix_data[M * N] = { 0 };
int ldm;
int row_stride;
int col_stride;
if (layouts[i] == cutlass::layout::Matrix::kColumnMajor) {
row_stride = 1;
col_stride = M;
ldm = col_stride;
}
else {
row_stride = N;
col_stride = 1;
ldm = row_stride;
}
// Use helper to determine stride vector from leading dimension
ContiguousTensorView view(
matrix_data,
cutlass::layout::ContiguousLayout::stride(layouts[i], ldm),
bounds);
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
cutlass::Coord<2> coord = cutlass::make_Coord(m, n);
if (view.contains(coord)) {
view.at(coord) = m * N + n;
}
}
}
std::cout << "---------\n";
std::cout << (layouts[i] == cutlass::layout::Matrix::kColumnMajor ?
"Column-major:" : "Row-major:") << "\n\n";
std::cout << "Logical view:\n";
std::cout.width(4);
std::cout << view << "\n" << std::endl; // Print TensorView object.
std::cout << "Linear memory:";
for (int idx = 0; idx < view.capacity(); ++idx) {
if (!(idx % (layouts[i] == cutlass::layout::Matrix::kColumnMajor ? M : N))) {
std::cout << std::endl;
}
std::cout << std::setw(4) << view.at(idx) << " ";
}
std::cout << "\n" << std::endl;
}
}
// This test is similar to the previous except it uses a column-major, interleaved data
// layout. The test prints both the logical representation (a typical column-major matrix)
// and a representation of linear memory.
//
// Note, the interleave=4 structure implies that every four consecutive elements in the
// same row shall be adjacent in memory followed by the next row.
TEST(TensorView, rank2_column_major_interleaved) {
int const M = 16;
int const N = 16;
int const kInterleave = 4;
int matrix_data[M * N] = {0};
cutlass::Coord<2> bounds = cutlass::make_Coord(M, N);
// Define the TensorRefMapFunc for a column-major interleaved matrix format
typedef cutlass::layout::ColumnMajorInterleaved<kInterleave> TensorRefMapFunc;
// Define a TensorView of rank=2 using the column-major interleaved mapping function
typedef cutlass::TensorView<
int,
TensorRefMapFunc> InterleavedTensorView;
InterleavedTensorView view(
matrix_data,
TensorRefMapFunc::stride(M),
bounds);
// Initialize
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
view.at(cutlass::make_Coord(m, n)) = m + n * M;
}
}
// Print logical view
std::cout << "Column-major, interleave=" << kInterleave << " (logical view):\n";
std::cout << std::setw(4) << view << "\n" << std::endl;
// Now define a linear view of the same data in memory
typedef cutlass::TensorView<int, 2, cutlass::layout::RowMajor> LinearTensorView;
LinearTensorView linear_view(matrix_data, cutlass::make_Coord(N), bounds);
std::cout << "Linear view in memory:\n";
std::cout << std::setw(4) << linear_view << std::endl;
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorView, int4) {
int const M = 4;
int const N = 8;
using T = cutlass::int4b_t;
cutlass::HostTensor<T, cutlass::layout::RowMajor> tensor({M, N});
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
T x = T(n ^ m); // some simple hash
tensor.host_view().at({m, n}) = x;
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
int x = (n ^ m); // some simple hash
EXPECT_TRUE(int(tensor.host_view().at({m, n})) == x);
}
}
EXPECT_EQ(tensor.size(), M * N);
}
TEST(TensorView, uint4) {
int const M = 4;
int const N = 8;
using T = cutlass::uint4b_t;
cutlass::HostTensor<T, cutlass::layout::RowMajor> tensor({M, N});
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
T x = T(n ^ m); // some simple hash
tensor.host_view().at({m, n}) = x;
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
int x = (n ^ m); // some simple hash
EXPECT_TRUE(int(tensor.host_view().at({m, n})) == x);
}
}
EXPECT_EQ(tensor.size(), M * N);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/core/tensor_view.cu/0 | {
"file_path": "test/unit/core/tensor_view.cu",
"repo_id": "test",
"token_count": 3276
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <cute/stride.hpp>
TEST(CuTe_core, CompactColMajor_Static)
{
using namespace cute;
CUTE_STATIC_ASSERT_V((compact_col_major(Int<1>{}) == Int<0>{}));
CUTE_STATIC_ASSERT_V((compact_col_major(Int<1>{}, Int<3>{}) == Int<0>{}));
CUTE_STATIC_ASSERT_V((compact_col_major(Int<8>{}) == Int<1>{}));
CUTE_STATIC_ASSERT_V((compact_col_major(Int<8>{}, Int<3>{}) == Int<3>{}));
CUTE_STATIC_ASSERT_V((compact_col_major(1) == Int<1>{}));
CUTE_STATIC_ASSERT_V((compact_col_major(8) == Int<1>{}));
{
auto test = make_tuple(Int<4>{}, Int<8>{});
auto result = make_tuple(Int<1>{}, Int<4>{});
CUTE_STATIC_ASSERT_V((compact_col_major(test) == result));
}
{
auto test = make_tuple(Int<4>{}, Int<8>{}, Int< 2>{});
auto result = make_tuple(Int<1>{}, Int<4>{}, Int<32>{});
CUTE_STATIC_ASSERT_V((compact_col_major(test) == result));
}
{
auto test = make_tuple(Int<4>{}, Int<8>{}, Int<1>{}, Int< 2>{});
auto result = make_tuple(Int<1>{}, Int<4>{}, Int<0>{}, Int<32>{});
CUTE_STATIC_ASSERT_V((compact_col_major(test) == result));
}
{
auto test = make_tuple(make_tuple(Int<4>{}, Int<8>{}), Int<1>{}, Int< 2>{});
auto result = make_tuple(make_tuple(Int<1>{}, Int<4>{}), Int<0>{}, Int<32>{});
CUTE_STATIC_ASSERT_V((compact_col_major(test) == result));
}
{
auto test = make_tuple(Int<4>{}, make_tuple(Int<8>{}, Int<1>{}, Int< 2>{}));
auto result = make_tuple(Int<1>{}, make_tuple(Int<4>{}, Int<0>{}, Int<32>{}));
CUTE_STATIC_ASSERT_V((compact_col_major(test) == result));
}
{
auto test = make_tuple(Int<4>{}, make_tuple(Int<8>{}, Int<1>{}, make_tuple(Int< 2>{}, Int< 3>{})));
auto result = make_tuple(Int<1>{}, make_tuple(Int<4>{}, Int<0>{}, make_tuple(Int<32>{}, Int<64>{})));
CUTE_STATIC_ASSERT_V((compact_col_major(test) == result));
}
}
TEST(CuTe_core, CompactColMajor_Dynamic)
{
using namespace cute;
ASSERT_TRUE((compact_col_major(1) == 1));
ASSERT_TRUE((compact_col_major(1, 3) == 3));
ASSERT_TRUE((compact_col_major(8) == 1));
ASSERT_TRUE((compact_col_major(8, 3) == 3));
ASSERT_TRUE((compact_col_major(1) == 1));
ASSERT_TRUE((compact_col_major(8) == 1));
{
auto test = make_tuple(4, 8);
auto result = make_tuple(1, 4);
ASSERT_TRUE((compact_col_major(test) == result));
}
{
auto test = make_tuple(4, 8, 2);
auto result = make_tuple(1, 4, 32);
ASSERT_TRUE((compact_col_major(test) == result));
}
{
auto test = make_tuple(4, 8, 1, 2);
auto result = make_tuple(1, 4, 32, 32);
ASSERT_TRUE((compact_col_major(test) == result));
}
{
auto test = make_tuple(make_tuple(4, 8), 1, 2);
auto result = make_tuple(make_tuple(1, 4), 32, 32);
ASSERT_TRUE((compact_col_major(test) == result));
}
{
auto test = make_tuple(4, make_tuple(8, 1, 2));
auto result = make_tuple(1, make_tuple(4, 32, 32));
ASSERT_TRUE((compact_col_major(test) == result));
}
{
auto test = make_tuple(4, make_tuple(8, 1, make_tuple( 2, 3)));
auto result = make_tuple(1, make_tuple(4, 32, make_tuple(32, 64)));
ASSERT_TRUE((compact_col_major(test) == result));
}
}
TEST(CuTe_core, CompactRowMajor_Static)
{
using namespace cute;
CUTE_STATIC_ASSERT_V((compact_row_major(Int<1>{}) == Int<0>{}));
CUTE_STATIC_ASSERT_V((compact_row_major(Int<1>{}, Int<3>{}) == Int<0>{}));
CUTE_STATIC_ASSERT_V((compact_row_major(Int<8>{}) == Int<1>{}));
CUTE_STATIC_ASSERT_V((compact_row_major(Int<8>{}, Int<3>{}) == Int<3>{}));
CUTE_STATIC_ASSERT_V((compact_row_major(1) == Int<1>{}));
CUTE_STATIC_ASSERT_V((compact_row_major(8) == Int<1>{}));
{
auto test = make_tuple(Int<4>{}, Int<8>{});
auto result = make_tuple(Int<8>{}, Int<1>{});
CUTE_STATIC_ASSERT_V((compact_row_major(test) == result));
}
{
auto test = make_tuple(Int< 4>{}, Int<8>{}, Int<2>{});
auto result = make_tuple(Int<16>{}, Int<2>{}, Int<1>{});
CUTE_STATIC_ASSERT_V((compact_row_major(test) == result));
}
{
auto test = make_tuple(Int< 4>{}, Int<8>{}, Int<1>{}, Int<2>{});
auto result = make_tuple(Int<16>{}, Int<2>{}, Int<0>{}, Int<1>{});
CUTE_STATIC_ASSERT_V((compact_row_major(test) == result));
}
{
auto test = make_tuple(make_tuple(Int< 4>{}, Int<8>{}), Int<1>{}, Int<2>{});
auto result = make_tuple(make_tuple(Int<16>{}, Int<2>{}), Int<0>{}, Int<1>{});
CUTE_STATIC_ASSERT_V((compact_row_major(test) == result));
}
{
auto test = make_tuple(Int< 4>{}, make_tuple(Int<8>{}, Int<1>{}, Int<2>{}));
auto result = make_tuple(Int<16>{}, make_tuple(Int<2>{}, Int<0>{}, Int<1>{}));
CUTE_STATIC_ASSERT_V((compact_row_major(test) == result));
}
{
auto test = make_tuple(Int< 4>{}, make_tuple(Int<8>{}, Int<1>{}, make_tuple(Int<2>{}, Int<3>{})));
auto result = make_tuple(Int<48>{}, make_tuple(Int<6>{}, Int<0>{}, make_tuple(Int<3>{}, Int<1>{})));
CUTE_STATIC_ASSERT_V((compact_row_major(test) == result));
}
}
TEST(CuTe_core, CompactRowMajor_Dynamic)
{
using namespace cute;
ASSERT_TRUE((compact_row_major(1) == 1));
ASSERT_TRUE((compact_row_major(1, 3) == 3));
ASSERT_TRUE((compact_row_major(8) == 1));
ASSERT_TRUE((compact_row_major(8, 3) == 3));
ASSERT_TRUE((compact_row_major(1) == 1));
ASSERT_TRUE((compact_row_major(8) == 1));
{
auto test = make_tuple(4, 8);
auto result = make_tuple(8, 1);
ASSERT_TRUE((compact_row_major(test) == result));
}
{
auto test = make_tuple( 4, 8, 2);
auto result = make_tuple(16, 2, 1);
ASSERT_TRUE((compact_row_major(test) == result));
}
{
auto test = make_tuple( 4, 8, 1, 2);
auto result = make_tuple(16, 2, 2, 1);
ASSERT_TRUE((compact_row_major(test) == result));
}
{
auto test = make_tuple(make_tuple( 4, 8), 1, 2);
auto result = make_tuple(make_tuple(16, 2), 2, 1);
ASSERT_TRUE((compact_row_major(test) == result));
}
{
auto test = make_tuple( 4, make_tuple(8, 1, 2));
auto result = make_tuple(16, make_tuple(2, 2, 1));
ASSERT_TRUE((compact_row_major(test) == result));
}
{
auto test = make_tuple( 4, make_tuple(8, 1, make_tuple(2, 3)));
auto result = make_tuple(48, make_tuple(6, 6, make_tuple(3, 1)));
ASSERT_TRUE((compact_row_major(test) == result));
}
}
| test/unit/cute/core/compact_xmajor.cpp/0 | {
"file_path": "test/unit/cute/core/compact_xmajor.cpp",
"repo_id": "test",
"token_count": 3509
} | 61 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/platform/platform.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Prototype algorithm for partitioning a 4D space across warps to achieve several performance
/// objectives:
///
/// - coalesced memory accesses in units of 128 Byte lines
/// - minimal address arithmetic
/// - minimal predicate calculations
///
struct OutputTileThreadMapExpr {
struct Shape {
int column;
int row;
int group;
int cluster;
Shape(int col = 1, int r = 1, int g = 1, int c = 1):
column(col), row(r), group(g), cluster(c) { }
};
int const kWarpSize = 32;
int const kMemoryAccessSize = 256; // size in bytes of the preferred memory access size
//
// Data members
//
Shape shape;
Shape count;
int threads;
int warp_count;
int elements_per_access;
int element_size;
Shape iterations;
Shape delta;
Shape warp_partitions;
int access_width_in_vectors;
int access_rows;
//
// Methods
//
OutputTileThreadMapExpr(
Shape shape_,
Shape count_,
int threads_,
int elements_per_access_,
int element_size_
):
shape(shape_),
count(count_),
threads(threads_),
warp_count(threads_ / kWarpSize),
elements_per_access(elements_per_access_),
element_size(element_size_) {
int warps_remaining = warp_count;
// clusters
if (shape.cluster > warp_count) {
iterations.cluster = shape.cluster / warp_count;
delta.cluster = shape.row * count.row * shape.group * count.group * shape.cluster / iterations.cluster;
warps_remaining = 1;
warp_partitions.cluster = warp_count;
}
else {
iterations.cluster = 1;
delta.cluster = 1;
warps_remaining = warp_count / shape.cluster;
warp_partitions.cluster = warps_remaining;
}
// group size
if (shape.group > warps_remaining) {
iterations.group = shape.group / warps_remaining;
delta.group = shape.row * count.row * shape.group / iterations.group;
warps_remaining = 1;
warp_partitions.group = warps_remaining;
}
else {
iterations.group = 1;
delta.group = 1;
warps_remaining = warps_remaining / shape.group;
warp_partitions.group = warps_remaining;
}
// Number of rows in a group
if (shape.row > warps_remaining) {
// We must cover this shape within a warp
int shape_row = shape.row / warps_remaining;
int shape_width_vectors = shape.column / elements_per_access;
// We would still like to minimize the number of strided increments. We can accomplish this
// by arranging the memory instructions as 2D, 128B wide accesses.
int target_memory_access_width = kMemoryAccessSize / (elements_per_access * element_size / 8);
int target_rows_per_access = kWarpSize / target_memory_access_width;
if (target_rows_per_access > shape_row) {
access_rows = shape_row;
access_width_in_vectors = kWarpSize / access_rows;
}
else {
access_width_in_vectors = cutlass::platform::min(
shape_width_vectors,
cutlass::platform::min(kWarpSize, kMemoryAccessSize / (elements_per_access * element_size / 8)));
access_rows = cutlass::platform::min(shape_row, kWarpSize / access_width_in_vectors);
}
iterations.row = shape_row / access_rows;
delta.row = access_rows;
iterations.column = shape_width_vectors / access_width_in_vectors;
delta.column = access_width_in_vectors * elements_per_access;
warp_partitions.column = 1;
warp_partitions.row = 1;
}
else {
iterations.row = 1;
delta.row = 1;
iterations.column = (shape.column / elements_per_access) / kWarpSize;
delta.column = kWarpSize * elements_per_access;
access_width_in_vectors = kWarpSize;
access_rows = 1;
warp_partitions.row = 1;
warp_partitions.column = warps_remaining;
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
std::ostream & operator<<(std::ostream &out, OutputTileThreadMapExpr::Shape const &shape) {
out << "col: " << shape.column << ", r: " << shape.row << ", g: " << shape.group << ", c: " << shape.cluster;
return out;
}
std::ostream & operator<<(std::ostream &out, OutputTileThreadMapExpr const &map) {
out
<< " shape(" << map.shape << ")\n"
<< " count(" << map.count << ")\n"
<< " iterations(" << map.iterations << ")\n"
<< " delta(" << map.delta << ")\n"
<< " warps(" << map.warp_partitions << ")\n"
<< " access(width: " << map.access_width_in_vectors
<< ", rows: " << map.access_rows
<< ") x v" << map.elements_per_access
<< ".b" << map.element_size << "\n";
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape,
typename Count,
int Threads,
int ElementsPerAccess,
int ElementSize
>
struct ThreadMapTestbed {
ThreadMapTestbed() {
OutputTileThreadMapExpr map(
{ Shape::kColumn, Shape::kRow, Shape::kGroup, Shape::kCluster },
{ Count::kColumn, Count::kRow, Count::kGroup, Count::kCluster },
Threads,
ElementsPerAccess,
ElementSize
);
using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap<
Shape,
Count,
Threads,
ElementsPerAccess,
ElementSize
>;
using CompactThreadmap = typename ThreadMap::CompactedThreadMap;
bool const kVerbose = false;
if (kVerbose) {
std::cout << map << std::endl;
std::cout << "ThreadMap::warps remaining:\n"
<< " for groups: " << ThreadMap::Detail::kWarpsRemainingForGroups << "\n"
<< " for rows: " << ThreadMap::Detail::kWarpsRemainingForRows << "\n";
std::cout << "ThreadMap::Access:\n"
<< " width: " << ThreadMap::Detail::kAccessWidth << "\n"
<< " rows: " << ThreadMap::Detail::kAccessRows << "\n";
std::cout << "ThreadMap::RowArrangement::Iterations:\n"
<< " row: " << int(ThreadMap::Detail::RowArrangement::kIterationsRow) << "\n";
}
EXPECT_EQ(int(ThreadMap::Delta::kCluster), map.delta.cluster);
EXPECT_EQ(int(ThreadMap::Delta::kGroup), map.delta.group);
EXPECT_EQ(int(ThreadMap::Delta::kRow), map.delta.row);
EXPECT_EQ(int(ThreadMap::Delta::kColumn), map.delta.column);
EXPECT_EQ(int(ThreadMap::Iterations::kCluster), map.iterations.cluster);
EXPECT_EQ(int(ThreadMap::Iterations::kGroup), map.iterations.group);
EXPECT_EQ(int(ThreadMap::Iterations::kRow), map.iterations.row);
EXPECT_EQ(int(ThreadMap::Iterations::kColumn), map.iterations.column);
if (kVerbose) {
std::cout << "Iterations(col: " << ThreadMap::Iterations::kColumn
<< ", r: " << ThreadMap::Iterations::kRow
<< ", g: " << ThreadMap::Iterations::kGroup
<< ", c: " << ThreadMap::Iterations::kCluster << ")\n";
std::cout << "Delta(col: " << ThreadMap::Delta::kColumn
<< ", r: " << ThreadMap::Delta::kRow
<< ", g: " << ThreadMap::Delta::kGroup
<< ", c: " << ThreadMap::Delta::kCluster << ")\n";
for (int tid = 0; tid < Threads; ++tid) {
auto output_coord = ThreadMap::initial_offset(tid);
auto source_coord = CompactThreadmap::initial_offset(tid);
std::cout << "T" << tid << " - output: " << output_coord << ", source: " << source_coord << "\n";
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(ThreadMap, f16_tensor_op_64x64_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<64, 8, 1, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 8, 1, 1, 1>;
int const kThreads = 32;
int const kElementsPerAccess = 8;
int const kElementSize = 16;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f16_tensor_op_128x128_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<128, 8, 2, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 8, 1, 1, 1>;
int const kThreads = 128;
int const kElementsPerAccess = 8;
int const kElementSize = 16;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f16_tensor_op_256x128_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<128, 8, 4, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 1>;
int const kThreads = 256;
int const kElementsPerAccess = 8;
int const kElementSize = 16;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f16_tensor_op_128x256_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<256, 8, 2, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 1>;
int const kThreads = 256;
int const kElementsPerAccess = 8;
int const kElementSize = 16;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f16_tensor_op_128x64_64x32x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<64, 8, 2, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 1>;
int const kThreads = 128;
int const kElementsPerAccess = 8;
int const kElementSize = 16;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f16_tensor_op_64x128_128x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<128, 8, 1, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 1>;
int const kThreads = 128;
int const kElementsPerAccess = 8;
int const kElementSize = 16;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f32_tensor_op_64x64_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<64, 8, 1, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 8, 1, 1, 1>;
int const kThreads = 32;
int const kElementsPerAccess = 4;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f32_tensor_op_128x128_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<128, 8, 2, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 1>;
int const kThreads = 128;
int const kElementsPerAccess = 4;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f32_tensor_op_256x128_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<128, 8, 4, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 1>;
int const kThreads = 256;
int const kElementsPerAccess = 4;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f32_tensor_op_128x256_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<256, 8, 2, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 1>;
int const kThreads = 256;
int const kElementsPerAccess = 4;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f32_tensor_op_128x64_64x32x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<64, 8, 2, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 1>;
int const kThreads = 128;
int const kElementsPerAccess = 4;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f32_tensor_op_64x128_128x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<128, 8, 1, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 1>;
int const kThreads = 128;
int const kElementsPerAccess = 4;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(ThreadMap, f32_volta_tensor_op_64x64_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<64, 2, 4, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 1>;
int const kThreads = 32;
int const kElementsPerAccess = 4;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f32_volta_tensor_op_64x128_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<128, 2, 4, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 1>;
int const kThreads = 64;
int const kElementsPerAccess = 4;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f32_volta_tensor_op_128x64_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<64, 2, 4, 2, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 1>;
int const kThreads = 64;
int const kElementsPerAccess = 4;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f32_volta_tensor_op_128x64_64x32x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<64, 2, 4, 2, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 1>;
int const kThreads = 128;
int const kElementsPerAccess = 4;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f32_volta_tensor_op_128x128_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<128, 2, 4, 2, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 1>;
int const kThreads = 128;
int const kElementsPerAccess = 4;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f32_volta_tensor_op_128x256_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<256, 2, 4, 2, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 1>;
int const kThreads = 256;
int const kElementsPerAccess = 4;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, f32_volta_tensor_op_256x128_64x64x8) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<128, 2, 4, 4, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 1>;
int const kThreads = 256;
int const kElementsPerAccess = 4;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(ThreadMap, simt_32x64_32x64x1) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<64, 1, 4, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 1>;
int const kThreads = 32;
int const kElementsPerAccess = 1;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, simt_32x128_32x64x1) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<128, 1, 4, 1, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 1>;
int const kThreads = 64;
int const kElementsPerAccess = 1;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, simt_64x128_32x64x1) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<128, 1, 4, 2, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 1>;
int const kThreads = 128;
int const kElementsPerAccess = 1;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
TEST(ThreadMap, simt_128x128_32x64x1) {
using Shape = cutlass::epilogue::threadblock::OutputTileShape<128, 1, 4, 4, 1>;
using Count = cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 1>;
int const kThreads = 256;
int const kElementsPerAccess = 1;
int const kElementSize = 32;
ThreadMapTestbed<Shape, Count, kThreads, kElementsPerAccess, kElementSize>();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/threadblock/output_tile_threadmap.cu/0 | {
"file_path": "test/unit/epilogue/threadblock/output_tile_threadmap.cu",
"repo_id": "test",
"token_count": 6938
} | 62 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_grouped.h"
#include "cutlass/gemm/kernel/default_gemm_grouped.h"
#include "cutlass/gemm/device/gemm_grouped.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "testbed_grouped.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Visitor class to abstract away the algorithm for iterating over tiles.
//
// This is the prototype. We will delete this when the efficient kernel is
// available.
struct GemmGroupedProblemVisitor {
struct Params {
cutlass::gemm::GemmCoord const *problem_sizes;
int32_t problem_count;
int64_t const *tile_count;
};
struct SharedStorage {
//
// Nothing for now. As an optimization step, we could consider parallel
// argmin or prefix sums across the block.
//
};
//
// Data members
//
SharedStorage &shared_storage;
Params const ¶ms;
cutlass::MatrixCoord threadblock_shape;
int64_t tile_idx;
int64_t tile_count_sum;
int64_t problem_tile_start;
int32_t problem_idx;
//
// Methods
//
CUTLASS_DEVICE
GemmGroupedProblemVisitor(
SharedStorage &shared_storage_,
Params const ¶ms_,
cutlass::MatrixCoord threadblock_shape_,
int32_t block_idx
):
shared_storage(shared_storage_),
params(params_),
threadblock_shape(threadblock_shape_),
tile_idx(block_idx),
tile_count_sum(0),
problem_idx(0)
{
cutlass::gemm::GemmCoord problem = params.problem_sizes[problem_idx];
cutlass::gemm::GemmCoord grid = grid_shape(problem);
problem_tile_start = 0;
tile_count_sum = grid.m() * grid.n();
}
/// Get the grid shape
CUTLASS_HOST_DEVICE
static cutlass::gemm::GemmCoord grid_shape(
cutlass::gemm::GemmCoord const &problem,
cutlass::MatrixCoord const & block_shape) {
return cutlass::gemm::GemmCoord(
((problem.m() - 1 + block_shape.row()) / block_shape.row()),
((problem.n() - 1 + block_shape.column()) / block_shape.column()),
1);
}
/// Get the grid shape
CUTLASS_DEVICE
cutlass::gemm::GemmCoord grid_shape(cutlass::gemm::GemmCoord const &problem) const {
return grid_shape(problem, threadblock_shape);
}
/// Returns true if there is a tile to compute
CUTLASS_DEVICE
bool next_tile() {
if (tile_idx < tile_count_sum) {
return true;
}
do {
++problem_idx;
if (problem_idx >= params.problem_count) {
return false;
}
cutlass::gemm::GemmCoord problem = params.problem_sizes[problem_idx];
cutlass::gemm::GemmCoord grid = grid_shape(problem);
int64_t tile_count = grid.m() * grid.n();
problem_tile_start = tile_count_sum;
tile_count_sum += tile_count;
} while (tile_count_sum <= tile_idx);
return true;
}
/// Gets the global tile index
CUTLASS_HOST_DEVICE
int64_t tile_index() const {
return tile_idx;
}
/// Gets the index of the problem
CUTLASS_HOST_DEVICE
int32_t problem_index() const {
return problem_idx;
}
/// Returns the problem size for the current problem
CUTLASS_HOST_DEVICE
cutlass::gemm::GemmCoord problem_size() const {
return params.problem_sizes[problem_idx];
}
CUTLASS_HOST_DEVICE
int64_t threadblock_idx() const {
return tile_idx - problem_tile_start;
}
CUTLASS_DEVICE
void advance(int32_t grid_size) {
tile_idx += grid_size;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <int ThreadblockShapeM, int ThreadblockShapeN>
__global__ void GroupedBatchedKernel(GemmGroupedProblemVisitor::Params params) {
__shared__ GemmGroupedProblemVisitor::SharedStorage shared_storage;
GemmGroupedProblemVisitor problem_visitor(
shared_storage,
params,
{ThreadblockShapeM, ThreadblockShapeN},
blockIdx.x);
while (problem_visitor.next_tile()) {
cutlass::gemm::GemmCoord problem_size = problem_visitor.problem_size();
int64_t threadblock_idx = problem_visitor.threadblock_idx();
cutlass::gemm::GemmCoord grid_shape = problem_visitor.grid_shape(problem_size);
int threadblock_tile_m_idx = int(threadblock_idx / grid_shape.n());
int threadblock_tile_n_idx = int(threadblock_idx % grid_shape.n());
//
// Do the MMA
//
if (threadIdx.x == 0) {
#if 0
printf("Block %d - tile: %lld, problem %d, threadblock_idx: %lld, threadblock(m: %d, n: %d)\n",
blockIdx.x,
static_cast<long long>(problem_visitor.tile_index()),
problem_visitor.problem_index(),
threadblock_idx,
threadblock_tile_m_idx,
threadblock_tile_n_idx);
#endif
}
// Next tile
problem_visitor.advance(gridDim.x);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_scheduler, 64x64x32_32x32x32) {
int32_t problem_count = 16;
int const kThreadblockShapeM = 64;
int const kThreadblockShapeN = 64;
std::vector<cutlass::gemm::GemmCoord> problem_sizes(problem_count);
std::vector<int64_t> tile_counts(problem_count);
// construct a few problems of random sizes
srand(1921);
for (int32_t i = 0; i < problem_count; ++i) {
problem_sizes.at(i) = cutlass::gemm::GemmCoord(
8 * (rand() % 48) + 64,
8 * (rand() % 48) + 64,
8 * (rand() % 48) + 64);
}
// compute prefix sum
int64_t tile_count = 0;
for (int32_t i = 0; i < problem_count; ++i) {
cutlass::gemm::GemmCoord grid_shape = GemmGroupedProblemVisitor::grid_shape(
problem_sizes.at(i), {kThreadblockShapeM, kThreadblockShapeN});
int32_t problem_tile_count = (grid_shape.m() * grid_shape.n());
int64_t tile_start = tile_count;
tile_count += problem_tile_count;
tile_counts.at(i) = tile_count;
if (false) {
std::cout << "Problem " << i << " size("
<< problem_sizes.at(i).m() << "-by-" << problem_sizes.at(i).n()
<< ") - tiles: " << problem_tile_count << ", grid(" << grid_shape.m() << ", " << grid_shape.n()
<< "), tiles[" << tile_start << ", " << tile_count << ")" << std::endl;
}
}
// Copy to device memory
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device(problem_count);
cutlass::DeviceAllocation<int64_t> tile_counts_device(problem_count);
problem_sizes_device.copy_from_host(problem_sizes.data());
tile_counts_device.copy_from_host(tile_counts.data());
GemmGroupedProblemVisitor::Params params;
params.problem_sizes = problem_sizes_device.get();
params.problem_count = problem_count;
params.tile_count = tile_counts_device.get();
// Launch the kernel
dim3 grid(108, 1, 1);
dim3 block(128, 1, 1);
GroupedBatchedKernel<kThreadblockShapeM, kThreadblockShapeN><<< grid, block >>>(params);
// wait
cudaDeviceSynchronize();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_f16n_f16t_f32n_tensor_op_f32, 128x128x32_64x64x32) {
using ElementOutput = float;
using ElementAccumulator = float;
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::ComplexTransform::kNone,
8,
cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::ComplexTransform::kNone,
8,
ElementOutput, cutlass::layout::ColumnMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
3>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Test
//
test::gemm::device::TestbedGrouped<Gemm> testbed;
bool passed = testbed.run(24);
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_f16n_f16t_f32t_tensor_op_f32, 128x128x32_64x64x32) {
using ElementOutput = float;
using ElementAccumulator = float;
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::ComplexTransform::kNone,
8,
cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::ComplexTransform::kNone,
8,
ElementOutput, cutlass::layout::RowMajor, // row major
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
3>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Test
//
test::gemm::device::TestbedGrouped<Gemm> testbed;
bool passed = testbed.run(24);
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_f16t_f16n_f32n_tensor_op_f32, 128x64x32_64x32x32) {
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
8,
cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::ComplexTransform::kNone,
8,
ElementOutput, cutlass::layout::ColumnMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 64, 32>,
cutlass::gemm::GemmShape<64, 32, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
4>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Test
//
test::gemm::device::TestbedGrouped<Gemm> testbed;
bool passed = testbed.run(27);
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_f16t_f16n_f32t_tensor_op_f32, 128x64x32_64x32x32) {
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
8,
cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::ComplexTransform::kNone,
8,
ElementOutput, cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 64, 32>,
cutlass::gemm::GemmShape<64, 32, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
4>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Test
//
test::gemm::device::TestbedGrouped<Gemm> testbed;
bool passed = testbed.run(27);
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_f64t_f64t_f64n_tensor_op_f64, 64x64x16_32x32x16) {
using ElementInput = double;
using ElementOutput = double;
using ElementAccumulator = double;
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
ElementInput,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
1,
ElementInput,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
1,
ElementOutput, cutlass::layout::ColumnMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<8, 8, 4>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 1,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
4>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Test
//
test::gemm::device::TestbedGrouped<Gemm> testbed;
bool passed = testbed.run(27);
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_f32t_f32t_f32n_simt_f32, 128x128x8_64x32x1) {
using ElementInput = float;
using ElementOutput = float;
using ElementAccumulator = float;
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
ElementInput,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
1,
ElementInput,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
1,
ElementOutput, cutlass::layout::ColumnMajor,
ElementAccumulator,
cutlass::arch::OpClassSimt,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 8>,
cutlass::gemm::GemmShape<64, 32, 8>,
cutlass::gemm::GemmShape<1, 1, 1>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 1,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
3>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Test
//
test::gemm::device::TestbedGrouped<Gemm> testbed;
bool passed = testbed.run(27);
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_f32t_f32t_f32t_simt_f32, 128x128x8_64x32x1) {
using ElementInput = float;
using ElementOutput = float;
using ElementAccumulator = float;
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
ElementInput,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
1,
ElementInput,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
1,
ElementOutput, cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassSimt,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 8>,
cutlass::gemm::GemmShape<64, 32, 8>,
cutlass::gemm::GemmShape<1, 1, 1>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 1,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
3>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Test
//
test::gemm::device::TestbedGrouped<Gemm> testbed;
bool passed = testbed.run(27);
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_f32t_f32t_f32n_simt_f32, 128x64x8_64x32x1) {
using ElementInput = float;
using ElementOutput = float;
using ElementAccumulator = float;
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
ElementInput,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
1,
ElementInput,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
1,
ElementOutput, cutlass::layout::ColumnMajor,
ElementAccumulator,
cutlass::arch::OpClassSimt,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 64, 8>,
cutlass::gemm::GemmShape<64, 32, 8>,
cutlass::gemm::GemmShape<1, 1, 1>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 1,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
3>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Test
//
test::gemm::device::TestbedGrouped<Gemm> testbed;
bool passed = testbed.run(27);
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_f32t_f32t_f32t_simt_f32, 128x64x8_64x32x1) {
using ElementInput = float;
using ElementOutput = float;
using ElementAccumulator = float;
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
ElementInput,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
1,
ElementInput,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
1,
ElementOutput, cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassSimt,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 64, 8>,
cutlass::gemm::GemmShape<64, 32, 8>,
cutlass::gemm::GemmShape<1, 1, 1>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 1,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
3>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Test
//
test::gemm::device::TestbedGrouped<Gemm> testbed;
bool passed = testbed.run(27);
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_cf32n_cf32n_cf32n_tensorop_f32, 64x64x16_32x32x16) {
using ElementInput = cutlass::complex<float>;
using ElementOutput = cutlass::complex<float>;
using ElementAccumulator = cutlass::complex<float>;
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
ElementInput,
cutlass::layout::ColumnMajor,
cutlass::ComplexTransform::kNone,
1,
ElementInput,
cutlass::layout::ColumnMajor,
cutlass::ComplexTransform::kNone,
1,
ElementOutput, cutlass::layout::ColumnMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 8, 8>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 1,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
3,
cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly,
cutlass::arch::OpMultiplyAddComplex>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Test
//
test::gemm::device::TestbedGrouped<Gemm> testbed;
bool passed = testbed.run(27);
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_cf32c_cf32t_cf32n_tensorop_f32, 64x64x16_32x32x16) {
using ElementInput = cutlass::complex<float>;
using ElementOutput = cutlass::complex<float>;
using ElementAccumulator = cutlass::complex<float>;
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
ElementInput,
cutlass::layout::ColumnMajor,
cutlass::ComplexTransform::kConjugate,
1,
ElementInput,
cutlass::layout::ColumnMajor,
cutlass::ComplexTransform::kConjugate,
1,
ElementOutput, cutlass::layout::ColumnMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 8, 8>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 1,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
3,
cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly,
cutlass::arch::OpMultiplyAddComplex>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Test
//
test::gemm::device::TestbedGrouped<Gemm> testbed;
bool passed = testbed.run(27);
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_cf32c_cf32t_cf32t_tensorop_f32, 64x64x16_32x32x16) {
using ElementInput = cutlass::complex<float>;
using ElementOutput = cutlass::complex<float>;
using ElementAccumulator = cutlass::complex<float>;
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
ElementInput,
cutlass::layout::ColumnMajor,
cutlass::ComplexTransform::kConjugate,
1,
ElementInput,
cutlass::layout::ColumnMajor,
cutlass::ComplexTransform::kConjugate,
1,
ElementOutput, cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 8, 8>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 1,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
3,
cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly,
cutlass::arch::OpMultiplyAddComplex>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Test
//
test::gemm::device::TestbedGrouped<Gemm> testbed;
bool passed = testbed.run(27);
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_Device_GemmGrouped_cf32t_cf32h_cf32n_tensorop_f32, 64x64x16_16x16x16) {
using ElementInput = cutlass::complex<double>;
using ElementOutput = cutlass::complex<double>;
using ElementAccumulator = cutlass::complex<double>;
using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped<
ElementInput,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
1,
ElementInput,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kConjugate,
1,
ElementOutput, cutlass::layout::ColumnMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 16, 16>,
cutlass::gemm::GemmShape<8, 8, 4>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 1,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,
3,
cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly,
cutlass::arch::OpMultiplyAddComplex>::GemmKernel;
using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>;
//
// Test
//
test::gemm::device::TestbedGrouped<Gemm> testbed;
bool passed = testbed.run(27);
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/gemm_grouped_sm80.cu/0 | {
"file_path": "test/unit/gemm/device/gemm_grouped_sm80.cu",
"repo_id": "test",
"token_count": 9790
} | 63 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/host_reorder.h"
namespace test {
namespace gemm {
namespace device {
////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, int InterleavedK>
struct MultistageInterleavedTestbed {
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
//
// Methods
//
MultistageInterleavedTestbed(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
cutlass::reference::host::TensorFillRandomUniform(
view, seed, 2, -2, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerMultiprocessor < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
//
// Allocate the GEMM workspace
//
cutlass::HostTensor<
typename Gemm::ElementA,
typename Gemm::LayoutA> tensor_A(problem_size.mk());
cutlass::HostTensor<
typename Gemm::ElementB,
typename Gemm::LayoutB> tensor_B(problem_size.kn());
cutlass::HostTensor<
typename Gemm::ElementB,
typename Gemm::LayoutB> tensor_B_reordered(problem_size.kn());
cutlass::HostTensor<
typename Gemm::ElementC,
typename Gemm::LayoutC> tensor_C(problem_size.mn());
cutlass::HostTensor<
typename Gemm::ElementC,
typename Gemm::LayoutC> tensor_D(problem_size.mn());
cutlass::HostTensor<
typename Gemm::ElementC,
typename Gemm::LayoutC> reference_D(problem_size.mn(), false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
cutlass::reorder_column<InterleavedK>(
tensor_B_reordered.host_ref(), tensor_B.host_ref(), problem_size);
cutlass::reference::host::TensorCopy(
reference_D.host_view(),
tensor_C.host_view());
tensor_A.sync_device();
tensor_B_reordered.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
problem_size,
tensor_A.device_ref(),
tensor_B_reordered.device_ref(),
tensor_C.device_ref(),
tensor_D.device_ref(),
{alpha, beta}
};
Gemm gemm_op;
cutlass::Status status = gemm_op.initialize(arguments);
EXPECT_TRUE(status == cutlass::Status::kSuccess);
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
//
// Verify
//
cutlass::reference::host::Gemm<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
typename Gemm::ElementC, typename Gemm::LayoutC, ElementCompute,
ElementAccumulator, typename Gemm::Operator>
reference_gemm;
reference_gemm(
problem_size,
alpha,
tensor_A.host_ref(),
tensor_B.host_ref(),
beta,
reference_D.host_ref(),
ElementAccumulator(0)
);
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(
reference_D.host_view(),
tensor_D.host_view());
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Gemm_device_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Gemm::ThreadblockShape::kM << "x"
<< Gemm::ThreadblockShape::kN << "x"
<< Gemm::ThreadblockShape::kK << "_"
<< Gemm::WarpShape::kM << "x"
<< Gemm::WarpShape::kN << "x"
<< Gemm::WarpShape::kK << ".txt";
std::ofstream file(fname.str());
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nB_reordered =\n" << tensor_B_reordered.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\nComputed =\n" << tensor_D.host_view();
}
return passed;
}
/// Runs a set of problem sizes
bool run_all() {
bool passed = true;
int problem_size_m[] = {
InterleavedK, 512 + InterleavedK
};
int problem_size_n[] = {
InterleavedK, 512 + InterleavedK
};
int problem_size_k[] = {
InterleavedK, Gemm::ThreadblockShape::kK * Gemm::kStages + InterleavedK
};
double problem_alpha[] = {
1.0
};
double problem_beta[] = {
0.0
};
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (double alpha : problem_alpha) {
for (double beta : problem_beta) {
passed = run(
{m, n, k},
ElementCompute(alpha),
ElementCompute(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
return true;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/multistage_testbed_interleaved.h/0 | {
"file_path": "test/unit/gemm/device/multistage_testbed_interleaved.h",
"repo_id": "test",
"token_count": 3986
} | 64 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Host reference and operations for Sm90 EVT unit test
*/
#pragma once
#include "gemm_testbed_3x_evt.hpp"
//////////////////////////////////////////////////////////////////////////////
/// Host references used for testing
namespace test::gemm::device {
template<class Gemm, class NodeOp, class ...ChildOp>
using HEVT = HostTreeVisitor<Gemm, NodeOp, ChildOp...>;
template<class Gemm, class EdgeTuple, class ...Ops>
using HDAG = HostTopoVisitor<Gemm, EdgeTuple, Ops...>;
template<class Gemm, class InputTree, class OutputTree, class... AuxOutTrees>
using HST = HostSplitTreeVisitor<Gemm, InputTree, OutputTree, AuxOutTrees...>;
/// D = alpha * acc + beta * C + AuxLoad
template<class Gemm, class ElementAux, class LayoutAux>
class HostEVTAuxLoad {
public:
using ScalarAlpha = HostScalarBroadcast<Gemm, 1>;
using AccFetchNode = HostAccumulator<Gemm>;
using AuxLoadNode = HostAuxLoad<Gemm, false, ElementAux, LayoutAux>;
using TernaryCompute0 = HEVT<HostCompute<Gemm, cutlass::homogeneous_multiply_add>, ScalarAlpha, AccFetchNode, AuxLoadNode>;
using ScalarBeta = HostScalarBroadcast<Gemm, 1>;
using CLoadNode = HostAuxLoad<Gemm, true>;
using TernaryCompute1 = HEVT<HostCompute<Gemm, cutlass::homogeneous_multiply_add>, ScalarBeta, CLoadNode, TernaryCompute0>;
using EVTModule = HEVT<HostAuxStore<Gemm, true>, TernaryCompute1>;
};
/// D = alpha * acc + beta * C + per-column bias
template<class Gemm, class ElementBias>
class HostPerColBias {
public:
using ScalarAlpha = HostScalarBroadcast<Gemm, 1>;
using AccFetchNode = HostAccumulator<Gemm>;
using RowBroadcastNode = HostRowBroadcast<Gemm, ElementBias>;
using TernaryCompute0 = HEVT<HostCompute<Gemm, cutlass::homogeneous_multiply_add>, ScalarAlpha, AccFetchNode, RowBroadcastNode>;
using ScalarBeta = HostScalarBroadcast<Gemm, 1>;
using CLoadNode = HostAuxLoad<Gemm, true>;
using TernaryCompute1 = HEVT<HostCompute<Gemm, cutlass::homogeneous_multiply_add>, ScalarBeta, CLoadNode, TernaryCompute0>;
using EVTModule = HEVT<HostAuxStore<Gemm, true>, TernaryCompute1>;
};
/// D = beta * C + Graph(relu(alpha * acc + aux) + aux)
/// Testing EVT - DAG structure
template<class Gemm>
class HostEVTDAG {
public:
using ScalarAlpha = HostScalarBroadcast<Gemm, 1>;
using AccFetchNode = HostAccumulator<Gemm>;
using AuxLoadNode = HostAuxLoad<Gemm, false, cutlass::half_t, cutlass::layout::RowMajor>;
using DAGNode = HDAG<
Gemm,
cute::tuple<
cute::tuple<>, // 0. alpha
cute::tuple<>, // 1. acc
cute::tuple<>, // 2. aux load
cute::tuple<cute::_0, cute::_1, cute::_2>, // 3. alpha * acc + aux load
cute::tuple<cute::_3>, // relu(alpha * acc + aux load)
cute::tuple<cute::_2, cute::_4> // relu(alpha * acc + aux load) + aux load
>,
ScalarAlpha,
AccFetchNode,
AuxLoadNode,
HostCompute<Gemm, cutlass::homogeneous_multiply_add>,
HostCompute<Gemm, cutlass::epilogue::thread::ReLu>,
HostCompute<Gemm, cutlass::plus>
>;
using ScalarBeta = HostScalarBroadcast<Gemm, 1>;
using CLoadNode = HostAuxLoad<Gemm, true>;
using TernaryCompute1 = HEVT<HostCompute<Gemm, cutlass::homogeneous_multiply_add>, ScalarBeta, CLoadNode, DAGNode>;
using EVTModule = HEVT<HostAuxStore<Gemm, true>, TernaryCompute1>;
};
/// EVT = alpha * acc + C
/// D = Graph(maximum(EVT + per-row bias, EVT))
/// Testing DAG - EVT
template<class Gemm>
class HostDAGEVT {
public:
using EVTNode = HEVT<
HostAuxStore<Gemm, false, cutlass::half_t, cutlass::layout::RowMajor>,
HEVT<
HostCompute<Gemm, cutlass::homogeneous_multiply_add>,
HostScalarBroadcast<Gemm, 2>,
HostAccumulator<Gemm>,
HostAuxLoad<Gemm, true>
>
>;
using EVTModule = HEVT<
HostAuxStore<Gemm, true>,
HDAG<
Gemm,
cute::tuple<
cute::tuple<>, // 0. EVT
cute::tuple<>, // 1. per-row bias
cute::tuple<cute::_0, cute::_1>, // 2. EVT + per-row bias
cute::tuple<cute::_0, cute::_2> // 3. maximum(EVT + per-row bias, EVT)
>,
EVTNode,
HostColBroadcast<Gemm, cutlass::half_t>,
HostCompute<Gemm, cutlass::plus>,
HostCompute<Gemm, cutlass::maximum_with_default_nan_propagation>
>
>;
};
/// Xreduce(alpha * acc + beta * C)
template<class Gemm, template<class, template <class> class, class> class ReduceOp>
class HostReduce {
public:
using ScalarAlpha = HostScalarBroadcast<Gemm, 1>;
using AccFetchNode = HostAccumulator<Gemm>;
using BinaryCompute0 = HEVT<HostCompute<Gemm, cutlass::multiplies>, ScalarAlpha, AccFetchNode>;
using ScalarBeta = HostScalarBroadcast<Gemm, 1>;
using CLoadNode = HostAuxLoad<Gemm, true>;
using TernaryCompute1 = HEVT<HostCompute<Gemm, cutlass::homogeneous_multiply_add>, ScalarBeta, CLoadNode, BinaryCompute0>;
using ReduceNode = HEVT<ReduceOp<Gemm, cutlass::plus, float>, TernaryCompute1>;
using EVTModule = HEVT<HostAuxStore<Gemm, true>, ReduceNode>;
};
// Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias
// if D is fp8
// D = scale_d * activation(Z)
// else
// D = activation(Z)
template <class Gemm, template <class> class ActivationFn, class ElementD>
class HostScaledLinCombPerRowBiasEltAct {
public:
using EVTModule = HEVT<
HostAuxStore<Gemm, true>,
HEVT<
HostCompute<Gemm, cutlass::epilogue::fusion::detail::ScaleOutOp<ElementD>::template Op>, // activation(Z) * scaled_d
HEVT<
HostCompute<Gemm, ActivationFn>, // activation(Z)
HEVT<
HostCompute<Gemm, cutlass::homogeneous_multiply_add>,
HostScalarBroadcast<Gemm, 1, 2>, // scale_c * beta
HostAuxLoad<Gemm, true>, // C
HEVT<
HostCompute<Gemm, cutlass::homogeneous_multiply_add>,
HostScalarBroadcast<Gemm, 1, 3>, // scale_a * scale_b * alpha
HostAccumulator<Gemm>,
HostColBroadcast<Gemm, ElementD>
>
>
>,
HostScalarBroadcast<Gemm, 1> // scale_d
>
>;
};
// Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias
// if D is fp8
// amax_d = max(abs(elements in activation(Z)))
// D = scale_d * activation(Z)
// else
// D = activation(Z)
// if Aux is fp8
// amax_aux = max(abs(elements in Z))
// Aux = scale_aux * Z
// else
// Aux = Z
template <class Gemm, template <class> class ActivationFn, class ElementD, class ElementAux = ElementD>
class HostScaledLinCombPerRowBiasEltActAmaxAux {
public:
template <typename T>
using amax = cutlass::maximum_absolute_value_reduction<T, true>;
using EVTModuleAuxFp8 = HEVT<
HostAuxStore<Gemm, true>,
HST<Gemm,
// Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias
HEVT<
HostCompute<Gemm, cutlass::homogeneous_multiply_add>,
HostScalarBroadcast<Gemm, 1, 2>, // scale_c * beta
HostAuxLoad<Gemm, true>, // C
HEVT<
HostCompute<Gemm, cutlass::homogeneous_multiply_add>,
HostScalarBroadcast<Gemm, 1, 3>, // scale_a * scale_b * alpha
HostAccumulator<Gemm>,
HostColBroadcast<Gemm, ElementD>
>
>,
// D = activation(Z) * scaled_d, amax_d = max(abs(elements in D))
HEVT<
HostCompute<Gemm, cutlass::epilogue::fusion::detail::ScaleOutOp<ElementD>::template Op>,
HEVT<
HostScalarReduce<Gemm, amax, float>,
HEVT<
HostCompute<Gemm, ActivationFn>, //activation(Z) * scaled_d
HostAccumulator<Gemm> // Z
>
>,
HostScalarBroadcast<Gemm, 1> // scale_d
>,
// Aux = Z * scale_aux, amax_aux = max(abs(elements in Aux))
HEVT<
HostAuxStore<Gemm, false, ElementAux, cutlass::layout::RowMajor>,
HEVT<
HostCompute<Gemm, cutlass::multiplies>,
HEVT<
HostScalarReduce<Gemm, amax, float>,
HostAccumulator<Gemm>
>,
HostScalarBroadcast<Gemm, 1>
>
>
>
>;
using EVTModuleAuxNotFp8 = HEVT<
// D = activation(Z) * scaled_d, amax_d = max(abs(elements in D))
HostAuxStore<Gemm, true>,
HEVT<
HostCompute<Gemm, cutlass::epilogue::fusion::detail::ScaleOutOp<ElementD>::template Op>,
HEVT<
HostScalarReduce<Gemm, amax, float>,
HEVT<
HostCompute<Gemm, ActivationFn>, //activation(Z) * scaled_d
HEVT<
// Aux = Z
HostAuxStore<Gemm, false, ElementAux, cutlass::layout::RowMajor>,
// Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias
HEVT<
HostCompute<Gemm, cutlass::homogeneous_multiply_add>,
HostScalarBroadcast<Gemm, 1, 2>, // scale_c * beta
HostAuxLoad<Gemm, true>, // C
HEVT<
HostCompute<Gemm, cutlass::homogeneous_multiply_add>,
HostScalarBroadcast<Gemm, 1, 3>, // scale_a * scale_b * alpha
HostAccumulator<Gemm>,
HostColBroadcast<Gemm, ElementD>
>
>
>
>
>,
HostScalarBroadcast<Gemm, 1> // scale_d
>
>;
using EVTModule = cute::conditional_t<cutlass::epilogue::fusion::detail::is_fp8_v<ElementAux>, EVTModuleAuxFp8, EVTModuleAuxNotFp8>;
};
} // namespace test::gemm::device
//////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue {
namespace fusion {
namespace detail {
template <typename T>
struct maximum_with_default_nan_propagation : maximum<T> {};
} // namespace detail
//////////////////////////////////////////////////////////////////////////////
/// D = alpha * acc + beta * C + AuxLoad
template<
class EpilogueDescriptor,
class AuxLoadDescriptor,
class ElementOutput,
class ElementCompute,
class ElementScalar = ElementCompute,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinCombAuxLoad =
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc + bias)
Sm90ScalarBroadcast<ElementScalar>, // beta
Sm90SrcFetch<ElementOutput>, // C
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc + bias
Sm90ScalarBroadcast<ElementScalar>, // alpha
Sm90AccFetch, // acc
Sm90AuxLoad<
AuxLoadDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxLoadDescriptor::Element,
typename AuxLoadDescriptor::Stride, typename AuxLoadDescriptor::SmemLayoutAtom,
typename AuxLoadDescriptor::CopyOpS2R // aux load
>
>
>;
//////////////////////////////////////////////////////////////////////////////
/// Example DAG
/// beta * C + Graph(alpha * acc + gamma + acc)
template<
typename EpilogueDescriptor,
typename AuxLoadDescriptor,
class ElementOutput,
class ElementCompute,
class ElementScalar = ElementCompute,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinCombEVTDAG =
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc + aux)
Sm90ScalarBroadcast<ElementScalar>, // beta
Sm90SrcFetch<ElementOutput>, // C
Sm90TopologicalVisitor<
ElementCompute,
cute::tuple<
cute::seq<>, // 0. alpha
cute::seq<>, // 1. acc
cute::seq<>, // 2. aux load
cute::seq<1, 0, 2>, // 3. alpha * acc + aux load
cute::seq<3>, // relu(alpha & acc + aux load)
cute::seq<2, 4> // relu(alpha * acc + aux load) + aux load
>,
Sm90ScalarBroadcast<ElementScalar>, // alpha
Sm90AccFetch, // acc
Sm90AuxLoad<
AuxLoadDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxLoadDescriptor::Element, typename AuxLoadDescriptor::Stride,
typename AuxLoadDescriptor::SmemLayoutAtom, typename AuxLoadDescriptor::CopyOpS2R>,
Sm90Compute<homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>,
Sm90Compute<cutlass::epilogue::thread::ReLu, ElementCompute, ElementCompute, RoundStyle>,
Sm90Compute<plus, ElementCompute, ElementCompute, RoundStyle>
>
>;
//////////////////////////////////////////////////////////////////////////////
/// Example DAG
/// EVT = alpha * acc + C
/// D = Graph(maximum(EVT + per-row bias, EVT))
template<
class EpilogueDescriptor,
class AuxStoreDescriptor,
class ElementOutput,
class ElementCompute,
class ElementBias = ElementOutput,
class ElementScalar = ElementCompute,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinCombDAGEVT =
Sm90TopologicalVisitor<
ElementCompute,
cute::tuple<
cute::seq<>,
cute::seq<>,
cute::seq<1, 0>,
cute::seq<0, 2>
>,
Sm90EVT<
Sm90AuxStore<
AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxStoreDescriptor::Element, RoundStyle, typename AuxStoreDescriptor::Stride,
typename AuxStoreDescriptor::SmemLayoutAtom, typename AuxStoreDescriptor::CopyOpR2S>,
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>,
Sm90ScalarBroadcast<ElementScalar>,
Sm90AccFetch,
Sm90SrcFetch<ElementOutput>
>
>,
Sm90ColBroadcast<0, typename EpilogueDescriptor::TileShape, ElementBias>,
Sm90Compute<plus, ElementCompute, ElementCompute, RoundStyle>,
Sm90Compute<detail::maximum_with_default_nan_propagation, ElementOutput, ElementCompute, RoundStyle>
>;
//////////////////////////////////////////////////////////////////////////////
/// D = alpha * acc + beta * C + per-column bias
template<
class EpilogueDescriptor,
class ElementOutput,
class ElementCompute,
class ElementBias = ElementOutput,
class ElementScalar = ElementCompute,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinCombPerColumnBias =
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc + bias)
Sm90ScalarBroadcast<ElementScalar>, // beta
Sm90SrcFetch<ElementOutput>, // C
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc + bias
Sm90ScalarBroadcast<ElementScalar>, // alpha
Sm90AccFetch, // acc
Sm90RowBroadcast<
ceil_div(
EpilogueDescriptor::StagesC,
size(shape_div(take<0, 2>(typename EpilogueDescriptor::TileShape{}), typename EpilogueDescriptor::EpilogueTile{}))
) + 1,
typename EpilogueDescriptor::TileShape,
ElementBias
>
>
>;
//////////////////////////////////////////////////////////////////////////////
/// D = per-column reduce(alpha * acc + beta * C)
template<
template <class> class RegReduceFn,
template <class> class GmemReduceFn,
class ElementReduce,
class CtaTileShapeMNK,
class ElementOutput,
class ElementCompute,
class ElementScalar = ElementCompute,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinCombPerColumnReduce =
Sm90EVT<Sm90RowReduction<RegReduceFn, RegReduceFn, GmemReduceFn, 0, CtaTileShapeMNK, ElementReduce, ElementCompute, RoundStyle>, // per column reduce
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + alpha * acc
Sm90ScalarBroadcast<ElementScalar>, // beta
Sm90SrcFetch<ElementOutput>, // C
Sm90EVT<Sm90Compute<multiplies, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc
Sm90ScalarBroadcast<ElementScalar>, // alpha
Sm90AccFetch // acc
>
>
>;
//////////////////////////////////////////////////////////////////////////////
/// D = per-row reduce(alpha * acc + beta * C)
template<
template <class> class RegReduceFn,
template <class> class GmemReduceFn,
class ElementReduce,
class CtaTileShapeMNK,
class ElementOutput,
class ElementCompute,
class ElementScalar = ElementCompute,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinCombPerRowReduce =
Sm90EVT<Sm90ColReduction<RegReduceFn, RegReduceFn, GmemReduceFn, 0, CtaTileShapeMNK, ElementReduce, ElementCompute, RoundStyle>, // per column reduce
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + alpha * acc
Sm90ScalarBroadcast<ElementScalar>, // beta
Sm90SrcFetch<ElementOutput>, // C
Sm90EVT<Sm90Compute<multiplies, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc
Sm90ScalarBroadcast<ElementScalar>, // alpha
Sm90AccFetch // acc
>
>
>;
//////////////////////////////////////////////////////////////////////////////
/// D = scalar reduce(alpha * acc + beta * C)
template<
template <class> class RegReduceFn,
template <class> class GmemReduceFn,
class ElementReduce,
class ElementOutput,
class ElementCompute,
class ElementScalar = ElementCompute,
FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest
>
using Sm90LinCombScalarReduce =
Sm90EVT<Sm90ScalarReduction<RegReduceFn, GmemReduceFn, ElementReduce, ElementCompute, RoundStyle>, // per column reduce
Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + alpha * acc
Sm90ScalarBroadcast<ElementScalar>, // beta
Sm90SrcFetch<ElementOutput>, // C
Sm90EVT<Sm90Compute<multiplies, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc
Sm90ScalarBroadcast<ElementScalar>, // alpha
Sm90AccFetch // acc
>
>
>;
} // namespace fusion
} // namespace cutlass::epilogue
| test/unit/gemm/device/sm90_evt_operations.hpp/0 | {
"file_path": "test/unit/gemm/device/sm90_evt_operations.hpp",
"repo_id": "test",
"token_count": 7660
} | 65 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface with bias and elementwise epilogues.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h"
#include "../../common/cutlass_unit_test.h"
#include "gemm_testbed_3x.hpp"
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_ReLU) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using FusionOperation = cutlass::epilogue::fusion::LinCombEltAct<
cutlass::epilogue::thread::ReLu, cutlass::half_t, float>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
bool passed = test::gemm::device::TestAll<Gemm, cutlass::epilogue::thread::ReLu>(1, 1);
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_ReLU_Legacy) {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations" // Suppress deprecation warnings
#ifdef _MSC_VER
#pragma warning( push )
#pragma warning( disable : 4996 )
#endif // _MSC_VER
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
static constexpr bool StoreT = true;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperativeBiasElementwise<
cutlass::epilogue::thread::ReLu, cutlass::half_t, cutlass::plus, StoreT, float>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
bool passed = test::gemm::device::TestAllBiasElementwise<Gemm>(1, 1);
EXPECT_TRUE(passed);
#ifdef _MSC_VER
#pragma warning( pop )
#endif // _MSC_VER
#pragma GCC diagnostic pop // Re-enable deprecation warnings
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_ReLU) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux<
LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, float>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
bool passed = test::gemm::device::TestAllBiasElementwise<Gemm>(1, 1);
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_GELU) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux<
LayoutC, cutlass::epilogue::thread::GELU, cutlass::half_t, float, cutlass::half_t, float>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using namespace test::gemm::device;
bool passed = TestAllBiasElementwise<Gemm>(1, 1, CheckEquality::RELATIVE);
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_ReLU_NoStoreT) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltAct<
cutlass::epilogue::thread::ReLu, cutlass::half_t, float, float>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
bool passed = test::gemm::device::TestAllBiasElementwise<Gemm>(1, 1);
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_Negate) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux<
LayoutC, cutlass::negate, cutlass::half_t, float, cutlass::half_t, float>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
bool passed = test::gemm::device::TestAllBiasElementwise<Gemm>(1, 1);
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_ReLU) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux<
LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, float>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::half_t, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
bool passed = test::gemm::device::TestAllBiasElementwise<Gemm>(1, 1);
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF32_ReLU_VoidC) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux<
LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, float, void>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
void, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
bool passed = test::gemm::device::TestAllBiasElementwise<Gemm>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasF16_ReLU_VoidC) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux<
LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, cutlass::half_t, void>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
void, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
bool passed = test::gemm::device::TestAllBiasElementwise<Gemm>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_BiasS8_ReLU_VoidC_U1Aux) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
// ReLU with uint1b_t aux will compute dReLU/dZ as the aux output, i.e. Aux(i) = (Z(i) >= 0) ? 1 : 0
using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux<
LayoutC, cutlass::epilogue::thread::ReLU, cutlass::half_t, float, cutlass::uint1b_t, int8_t, void>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
void, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
bool passed = test::gemm::device::TestAllBiasElementwise<Gemm>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_dReLU_dBias_VoidC) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using FusionOperation = cutlass::epilogue::fusion::LinCombDeEltActDePerRowBias<
LayoutC, cutlass::epilogue::thread::dReLU, cutlass::half_t, float, cutlass::uint1b_t, float, void>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
void, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
bool passed = test::gemm::device::TestAllBiasElementwise<Gemm>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_dGELU_VoidC) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using FusionOperation = cutlass::epilogue::fusion::LinCombDeEltAct<
LayoutC, cutlass::epilogue::thread::dGELU, cutlass::half_t, float, cutlass::half_t, void>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
void, LayoutC, 8,
cutlass::half_t, LayoutC, 8,
EpilogueSchedule,
FusionOperation
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using namespace test::gemm::device;
bool passed = TestAllBiasElementwise<Gemm>(1.0, 0.0, CheckEquality::RELATIVE);
EXPECT_TRUE(passed);
}
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
| test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_bias_elementwise.cu/0 | {
"file_path": "test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_bias_elementwise.cu",
"repo_id": "test",
"token_count": 9605
} | 66 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for Sm90 f8_f8_bf16 with EVT epilogue
ScaledLinCombPerRowBiasEltAct and ScaledLinCombPerRowBiasEltActAmaxAux
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h"
#include "../../common/cutlass_unit_test.h"
#include "gemm_testbed_3x_evt.hpp"
#include "sm90_evt_operations.hpp"
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
// Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias
// if D is fp8
// D = scale_d * activation(Z)
// else
// D = activation(Z)
TEST(SM90_Device_Gemm_e4m3t_e4m3n_bf16t_tensor_op_gmma_f32_epilogue, 64x128x128_ScaledLinCombPerRowBiasEltAct) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_64,_128,_128>;
using ClusterShape_MNK = Shape<_1,_1,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using FusionCallbacks = cutlass::epilogue::fusion::Sm90ScaledLinCombPerRowBiasEltAct<
TileShape_MNK, // CtaTileShapeMNK
cutlass::epilogue::thread::ReLu, // ActivationFn
cutlass::bfloat16_t, // ElementOutput
float, // ElementCompute
cutlass::bfloat16_t // ElementBias
>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::bfloat16_t, LayoutC, 8,
cutlass::bfloat16_t, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecialized
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
// Host reference
using HostReference = test::gemm::device::HostScaledLinCombPerRowBiasEltAct<
Gemm, cutlass::epilogue::thread::ReLu, cutlass::bfloat16_t
>;
bool passed = test::gemm::device::TestAllEVT<Gemm, HostReference>(true);
EXPECT_TRUE(passed);
}
// Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias
// if D is fp8
// amax_d = max(abs(elements in activation(Z)))
// D = scale_d * activation(Z)
// else
// D = activation(Z)
// if Aux is fp8
// amax_aux = max(abs(elements in Z))
// Aux = scale_aux * Z
// else
// Aux = Z
TEST(SM90_Device_Gemm_e4m3t_e4m3n_bf16n_tensor_op_gmma_f32_epilogue, 64x128x128_4x1x1_ScaledLinCombPerRowBiasEltActAmaxAux) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_64,_128,_128>;
using ClusterShape_MNK = Shape<_2,_4,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecialized;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::bfloat16_t, cutlass::bfloat16_t, EpilogueSchedule>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::bfloat16_t>;
using FusionCallbacks = cutlass::epilogue::fusion::Sm90ScaledLinCombPerRowBiasEltActAmaxAux<
TileShape_MNK, // CtaTileShapeMNK
typename EpilogueDescriptor::EpilogueTile, // EpilogueTile
EpilogueDescriptor::StagesD, // StagesD
typename AuxStoreDescriptor::Stride, // StrideAux
typename AuxStoreDescriptor::SmemLayoutAtom, // SmemLayoutAtom
typename AuxStoreDescriptor::CopyOpR2S, // CopyOpR2S
cutlass::epilogue::thread::ReLu, // ActivationFn
cutlass::bfloat16_t, // ElementOutput
float, // ElementCompute
cutlass::bfloat16_t, // ElementBias
float // ElementScalar
>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cutlass::bfloat16_t, LayoutC, 16,
cutlass::bfloat16_t, LayoutC, 16,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::float_e4m3_t, LayoutA, 16,
cutlass::float_e4m3_t, LayoutB, 16,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecialized
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
// Host reference
using HostReference = test::gemm::device::HostScaledLinCombPerRowBiasEltActAmaxAux<
Gemm, cutlass::epilogue::thread::ReLu, cutlass::bfloat16_t
>;
bool passed = test::gemm::device::TestAllEVT<Gemm, HostReference>(true);
EXPECT_TRUE(passed);
}
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
| test/unit/gemm/device/sm90_gemm_f8_f8_bf16_tensor_op_fp32_evt.cu/0 | {
"file_path": "test/unit/gemm/device/sm90_gemm_f8_f8_bf16_tensor_op_fp32_evt.cu",
"repo_id": "test",
"token_count": 3315
} | 67 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "testbed.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
struct TestbedSplitK : public Testbed<Gemm> {
using Base = Testbed<Gemm>;
using ElementCompute = typename Base::ElementCompute;
//
// Methods
//
TestbedSplitK(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
Base(init_A_, init_B_, init_C_, seed_) { }
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmCoord problem_size,
int split_k_slices,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
problem_size,
this->tensor_A.device_ref(),
this->tensor_B.device_ref(),
this->tensor_C.device_ref(),
this->tensor_D.device_ref(),
{alpha, beta},
split_k_slices
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess);
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
//
// Verify
//
return this->verify(problem_size, alpha, beta);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
bool TestAllGemmSplitK() {
bool passed = true;
cutlass::gemm::GemmCoord problem_sizes[] = {
{8, 8, 2048},
{8, 8, 2056},
{264, 72, 520},
{264, 520, 120},
{264, 520, 264}
};
int split_k_slices[] = {
1, 2, 4, 5, 7
};
double problem_alpha[] = {
0.5
};
double problem_beta[] = {
2.0
};
using Testbed = TestbedSplitK<Gemm>;
using ElementCompute = typename Testbed::ElementCompute;
Testbed testbed;
for (auto problem_size : problem_sizes) {
for (int split_k_count : split_k_slices) {
for (double alpha : problem_alpha) {
for (double beta : problem_beta) {
passed = testbed.run(
problem_size,
split_k_count,
ElementCompute(alpha),
ElementCompute(beta)
);
if (!passed) {
std::cout << "Failed on size " << problem_size << " with split_k_count " << split_k_count << std::endl;
return false;
}
}
}
}
}
EXPECT_TRUE(passed);
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_splitk.h/0 | {
"file_path": "test/unit/gemm/device/testbed_splitk.h",
"repo_id": "test",
"token_count": 2088
} | 68 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cute/tensor.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
namespace nvrtc {
namespace thread {
template<
typename ElementA, typename ElementB, typename ElementC,
typename TileShape, typename ClusterShape,
bool kTransA, bool kTransB,
int RANK_M, int RANK_N, int RANK_K, int RANK_L
>
struct ContractionKernel {
using ElementScalar = float;
using ElementAccum = float;
using EpilogueThread = cutlass::epilogue::thread::LinearCombination<ElementC,
1,
ElementAccum,
ElementScalar>;
static constexpr cute::GMMA::Major majorA = ! kTransA ? cute::GMMA::Major::MN : cute::GMMA::Major::K;
static constexpr cute::GMMA::Major majorB = ! kTransB ? cute::GMMA::Major::K : cute::GMMA::Major::MN;
/// Kernel config
typedef int64_t stride_type;
typedef int32_t extent_type;
static constexpr const stride_type* stride_null = nullptr;
static constexpr const extent_type* extent_null = nullptr;
template <int Rank, bool IsMajor, class Indexable>
static constexpr
auto
make_stride_tuple(Indexable const& t, int n, int64_t init_default = 0) {
static_assert(Rank > 1);
if constexpr (IsMajor) {
return cute::transform(cute::make_seq<Rank>{}, [&](auto i) {
if constexpr (i == 0) {
return cute::Int<1>{};
}
else {
return i < n ? t[i] : init_default;
}
});
}
else {
return cute::make_int_tuple<Rank>(t, n, init_default);
}
}
using StrideA = decltype(cute::make_stride(
make_stride_tuple<RANK_M, majorA == cute::GMMA::Major::MN>(stride_null, 0, 0),
make_stride_tuple<RANK_K, majorA == cute::GMMA::Major::K>(stride_null, 0, 0),
cute::make_int_tuple<RANK_L>(stride_null, 0, 0)));
using StrideB = decltype(cute::make_stride(
make_stride_tuple<RANK_N, majorB == cute::GMMA::Major::MN>(stride_null, 0, 0),
make_stride_tuple<RANK_K, majorB == cute::GMMA::Major::K>(stride_null, 0, 0),
cute::make_int_tuple<RANK_L>(stride_null, 0, 0)));
using StrideC = decltype(cute::make_stride(
cute::make_int_tuple<RANK_M>(stride_null, 0, 0),
cute::make_int_tuple<RANK_N>(stride_null, 0, 0),
cute::make_int_tuple<RANK_L>(stride_null, 0, 0)));
using ProblemShape = decltype(cute::make_shape(
cute::make_int_tuple<RANK_M>(extent_null, 0, 0),
cute::make_int_tuple<RANK_N>(extent_null, 0, 0),
cute::make_int_tuple<RANK_K>(extent_null, 0, 0),
cute::make_int_tuple<RANK_L>(extent_null, 0, 0)));
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, StrideA, 16 / sizeof(ElementA),
ElementB, StrideB, 16 / sizeof(ElementB),
ElementAccum,
TileShape, ClusterShape, cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecialized
>::CollectiveOp;
using EpilogueOutputOp = cutlass::epilogue::collective::DefaultEpilogue<StrideC, StrideC, EpilogueThread, cutlass::gemm::EpilogueDefault>;
using CollectiveEpilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<EpilogueOutputOp>;
using Kernel = cutlass::gemm::kernel::GemmUniversal<
ProblemShape,
CollectiveOp,
CollectiveEpilogue>;
};
} // namespace nvrtc
} // namespace thread
| test/unit/nvrtc/kernel/thread/contraction.hpp/0 | {
"file_path": "test/unit/nvrtc/kernel/thread/contraction.hpp",
"repo_id": "test",
"token_count": 1978
} | 69 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Common Testbed file shared by Pipeline unit tests
*/
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <cutlass/gemm/gemm.h>
#include "cutlass/util/command_line.h"
#include "../common/cutlass_unit_test.h"
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
#define CUTLASS_UNIT_TEST_PIPELINE true
#else
#define CUTLASS_UNIT_TEST_PIPELINE false
#endif
// Command line test options
struct Options {
//
// Data Members
//
bool help;
bool verification_enabled;
int SM_count;
int clock_MHz;
//
// Methods
//
Options():
help(false),
verification_enabled(true),
SM_count(116),
clock_MHz(1477)
{ }
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("verification-enabled", verification_enabled, true);
cmd.get_cmd_line_argument("sm-count", SM_count, 116);
cmd.get_cmd_line_argument("clock", clock_MHz, 1477);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --verification-enabled=<bool> Enable/Disable verification\n"
<< " --sm-count=<int> Number of SMs on the chip\n"
<< " --clock=<int> Locked clock value in Mhz\n";
return out;
}
};
//
// Testbed
//
template<typename Pipeline>
struct Testbed {
private:
// Commandline options
Options options;
void run_test(uint32_t const kNumIters) {
// Run CuTe Gemm
Pipeline pipeline;
cudaError_t result = pipeline.run(kNumIters);
CUTE_CHECK_LAST();
}
public:
Testbed(Options const &options_) : options(options_) {
int device_id = 0;
cudaDeviceProp device_prop;
CUTE_CHECK_ERROR(cudaSetDevice(device_id));
CUTE_CHECK_ERROR(cudaGetDeviceProperties(&device_prop, device_id));
if (device_prop.major < 1) {
fprintf(stderr, "Device does not support CUDA.\n");
exit(1);
}
}
/// Run verification Gemm problem sizes
bool verification() {
std::array<uint32_t, 5> kNumIters;
for (size_t i = 0; i < kNumIters.size(); ++i) {
kNumIters[i] = static_cast<uint32_t>( (rand() % 1000) + 1 );
}
for (int n : kNumIters) {
std::cout << "Stages = " << Pipeline::Stages << " kNumIters = " << n << "\n";
run_test(n);
}
return true;
}
};
| test/unit/pipeline/testbed.h/0 | {
"file_path": "test/unit/pipeline/testbed.h",
"repo_id": "test",
"token_count": 1513
} | 70 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests cutlass::transform::threadblock::PredicatedTileIterator
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace transform {
namespace threadblock {
namespace kernel {
/// Copy with an iterator
template <typename Iterator>
__global__ void copy(
typename Iterator::Params dst_params,
typename Iterator::Element *dst_pointer,
typename Iterator::Params src_params,
typename Iterator::Element *src_pointer,
cutlass::Coord<2> extent) {
Iterator dst_iterator(dst_params, dst_pointer, extent, threadIdx.x);
Iterator src_iterator(src_params, src_pointer, extent, threadIdx.x);
int iterations = (extent[1] + Iterator::Shape::kStrided - 1) / Iterator::Shape::kStrided;
typename Iterator::Fragment frag;
for(size_t i = 0; i < frag.size(); i++)
frag[i] = 0;
src_iterator.load(frag);
dst_iterator.store(frag);
++dst_iterator;
++src_iterator;
for (; iterations > 1; --iterations) {
src_iterator.load(frag);
dst_iterator.store(frag);
++dst_iterator;
++src_iterator;
}
}
} // namespace kernel
} // namespace threadblock
} // namespace transform
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined) {
using Shape = cutlass::layout::PitchLinearShape<64, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<Shape, kThreads>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator<
Shape, Element, Layout, 1, ThreadMap
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(57, 35);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 35);
cutlass::HostTensor<int, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity());
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]; ++s) {
for (int c = 0; c < alloc_extent[0]; ++c) {
Element expected = Element(0);
if (c < copy_extent[0] && s < copy_extent[1]) {
expected = src_tensor.at({c, s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({c, s});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_128x4) {
using Shape = cutlass::layout::PitchLinearShape<128, 4>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap, false
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(128, 4);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(128, 4);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity());
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]; ++s) {
for (int c = 0; c < alloc_extent[0]; ++c) {
Element expected = Element(0);
if (c < copy_extent[0] && s < copy_extent[1]) {
expected = src_tensor.at({c, s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({c, s});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_128x64) {
using Shape = cutlass::layout::PitchLinearShape<128, 64>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(128, 64);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(128, 64);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity());
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]; ++s) {
for (int c = 0; c < alloc_extent[0]; ++c) {
Element expected = Element(0);
if (c < copy_extent[0] && s < copy_extent[1]) {
expected = src_tensor.at({c, s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({c, s});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_64x64) {
using Shape = cutlass::layout::PitchLinearShape<64, 64>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(64, 64);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 64);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity());
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]; ++s) {
for (int c = 0; c < alloc_extent[0]; ++c) {
Element expected = Element(0);
if (c < copy_extent[0] && s < copy_extent[1]) {
expected = src_tensor.at({c, s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({c, s});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_64x8) {
using Shape = cutlass::layout::PitchLinearShape<64, 8>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(32, 8);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 8);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity());
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]; ++s) {
for (int c = 0; c < alloc_extent[0]; ++c) {
Element expected = Element(0);
if (c < copy_extent[0] && s < copy_extent[1]) {
expected = src_tensor.at({c, s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({c, s});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_64x32_transpose4x4) {
using Shape = cutlass::layout::PitchLinearShape<64, 8>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap, true
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(64, 32);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 32);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
uint64_t seed = 7;
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::TensorFillRandomUniform(src_tensor.host_view(), seed, 8, -8, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]/4; ++s) {
for (int c = 0; c < alloc_extent[0]/4; ++c) {
for (int s1 = 0; s1 < 4; s1++){
for(int c1 = 0; c1 < 4; c1++){
Element expected = Element(0);
int l_c = c * 4 + c1;
int l_s = s * 4 + s1;
int l_tc = c * 4 + s1;
int l_ts = s * 4 + c1;
if (l_c < copy_extent[0] && l_s < copy_extent[1]) {
expected = src_tensor.at({l_c, l_s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({l_tc, l_ts});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_64x29_transpose4x4) {
using Shape = cutlass::layout::PitchLinearShape<64, 8>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap, true
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(64, 29);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(64, 29);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
uint64_t seed = 7;
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::TensorFillRandomUniform(src_tensor.host_view(), seed, 8, -8, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]/4; ++s) {
for (int c = 0; c < alloc_extent[0]/4; ++c) {
for (int s1 = 0; s1 < 4; s1++){
for(int c1 = 0; c1 < 4; c1++){
Element expected = Element(0);
int l_c = c * 4 + c1;
int l_s = s * 4 + s1;
int l_tc = c * 4 + s1;
int l_ts = s * 4 + c1;
if (l_c < copy_extent[0] && l_s < copy_extent[1]) {
expected = src_tensor.at({l_c, l_s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({l_tc, l_ts});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_120x4_transpose4x4) {
using Shape = cutlass::layout::PitchLinearShape<128, 4>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap, true
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(120, 4);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(120, 4);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
uint64_t seed = 7;
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::TensorFillRandomUniform(src_tensor.host_view(), seed, 8, -8, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]/4; ++s) {
for (int c = 0; c < alloc_extent[0]/4; ++c) {
for (int s1 = 0; s1 < 4; s1++){
for(int c1 = 0; c1 < 4; c1++){
Element expected = Element(0);
int l_c = c * 4 + c1;
int l_s = s * 4 + s1;
int l_tc = c * 4 + s1;
int l_ts = s * 4 + c1;
if (l_c < copy_extent[0] && l_s < copy_extent[1]) {
expected = src_tensor.at({l_c, l_s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({l_tc, l_ts});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Transform_threadblock_PredicatedTileIterator, PitchLinear_Stripmined_2dtile_48x29_transpose4x4) {
using Shape = cutlass::layout::PitchLinearShape<64, 8>;
using ThreadTileShape = cutlass::layout::PitchLinearShape<4, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int8_t;
static int const kThreads = 32;
using ThreadMap = cutlass::transform::PitchLinear2DThreadTileStripminedThreadMap<Shape, kThreads, ThreadTileShape>;
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
Shape, Element, Layout, 1, ThreadMap, true
>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(48, 29);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(48, 29);
cutlass::HostTensor<int8_t, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<int8_t, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
uint64_t seed = 7;
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
cutlass::reference::host::TensorFillRandomUniform(src_tensor.host_view(), seed, 8, -8, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
test::transform::threadblock::kernel::copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " - CUDA error: " << cudaGetErrorString(result);
dst_tensor.sync_host();
for (int s = 0; s < alloc_extent[1]/4; ++s) {
for (int c = 0; c < alloc_extent[0]/4; ++c) {
for (int s1 = 0; s1 < 4; s1++){
for(int c1 = 0; c1 < 4; c1++){
Element expected = Element(0);
int l_c = c * 4 + c1;
int l_s = s * 4 + s1;
int l_tc = c * 4 + s1;
int l_ts = s * 4 + c1;
if (l_c < copy_extent[0] && l_s < copy_extent[1]) {
expected = src_tensor.at({l_c, l_s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({l_tc, l_ts});
bool equal = (expected == got);
EXPECT_EQ(expected, got)
<< "Source:\n" << src_tensor.host_view() << "\n\n"
<< "Destination:\n" << dst_tensor.host_view() << "\n";
if (!equal) {
return;
}
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/transform/threadblock/predicated_tile_iterator.cu/0 | {
"file_path": "test/unit/transform/threadblock/predicated_tile_iterator.cu",
"repo_id": "test",
"token_count": 9998
} | 71 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Utilities accompanying the CUTLASS library for interacting with Library types.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Lexical cast from string
template <typename T> T from_string(std::string const &);
/// Converts a Provider enumerant to a string
char const *to_string(Provider provider, bool pretty = false);
/// Parses a Provider enumerant from a string
template <> Provider from_string<Provider>(std::string const &str);
/// Converts a GemmKind enumerant to a string
char const *to_string(GemmKind type, bool pretty = false);
/// Converts a RankKKind enumerant to a string
char const *to_string(RankKKind type, bool pretty = false);
/// Converts a TrmmKind enumerant to a string
char const *to_string(TrmmKind type, bool pretty = false);
/// Converts a SymmKind enumerant to a string
char const *to_string(SymmKind type, bool pretty = false);
/// Converts a SideMode enumerant to a string
char const *to_string(SideMode type, bool pretty = false);
/// Converts a FillMode enumerant to a string
char const *to_string(FillMode type, bool pretty = false);
/// Converts a BlasMode enumerant to a string
char const *to_string(BlasMode type, bool pretty = false);
/// Converts a DiagType enumerant to a string
char const *to_string(DiagType type, bool pretty = false);
/// Converts a NumericType enumerant to a string
char const *to_string(OperationKind type, bool pretty = false);
/// Parses a NumericType enumerant from a string
template <> OperationKind from_string<OperationKind>(std::string const &str);
/// Converts a NumericType enumerant to a string
char const *to_string(NumericTypeID type, bool pretty = false);
/// Parses a NumericType enumerant from a string
template <> NumericTypeID from_string<NumericTypeID>(std::string const &str);
/// Returns the size of a data type in bits
int sizeof_bits(NumericTypeID type);
/// Returns true if the numeric type is a complex data type or false if real-valued.
bool is_complex_type(NumericTypeID type);
/// Returns the real-valued type underlying a type (only different from 'type' if complex)
NumericTypeID get_real_type(NumericTypeID type);
/// Returns true if numeric type is integer
bool is_integer_type(NumericTypeID type);
/// Returns true if numeric type is signed
bool is_signed_type(NumericTypeID type);
/// Returns true if numeric type is a signed integer
bool is_signed_integer(NumericTypeID type);
/// returns true if numeric type is an unsigned integer
bool is_unsigned_integer(NumericTypeID type);
/// Returns true if numeric type is floating-point type
bool is_float_type(NumericTypeID type);
/// To string method for cutlass::Status
char const *to_string(Status status, bool pretty = false);
/// Converts a LayoutTypeID enumerant to a string
char const *to_string(LayoutTypeID layout, bool pretty = false);
/// Parses a LayoutType enumerant from a string
template <> LayoutTypeID from_string<LayoutTypeID>(std::string const &str);
/// Returns the rank of a layout's stride base on the LayoutTypeID
int get_layout_stride_rank(LayoutTypeID layout_id);
/// Converts a OpcodeClassID enumerant to a string
char const *to_string(OpcodeClassID type, bool pretty = false);
/// Converts a OpcodeClassID enumerant from a string
template <>
OpcodeClassID from_string<OpcodeClassID>(std::string const &str);
/// Converts a ComplexTransform enumerant to a string
char const *to_string(ComplexTransform type, bool pretty = false);
/// Converts a ComplexTransform enumerant from a string
template <>
ComplexTransform from_string<ComplexTransform>(std::string const &str);
/// Converts a SplitKMode enumerant to a string
char const *to_string(SplitKMode split_k_mode, bool pretty = false);
/// Converts a SplitKMode enumerant from a string
template <>
SplitKMode from_string<SplitKMode>(std::string const &str);
/// Converts a ConvModeID enumerant to a string
char const *to_string(ConvModeID type, bool pretty = false);
/// Converts a ConvModeID enumerant from a string
template <>
ConvModeID from_string<ConvModeID>(std::string const &str);
/// Converts a IteratorAlgorithmID enumerant to a string
char const *to_string(IteratorAlgorithmID type, bool pretty = false);
/// Converts a IteratorAlgorithmID enumerant from a string
template <>
IteratorAlgorithmID from_string<IteratorAlgorithmID>(std::string const &str);
/// Converts a ConvKind enumerant to a string
char const *to_string(ConvKind type, bool pretty = false);
/// Converts a ConvKind enumerant from a string
template <>
ConvKind from_string<ConvKind>(std::string const &str);
/// Converts a RasterOrder enumerant to a string
char const *to_string(RasterOrder type, bool pretty = false);
/// Convers a RasterOrder enumerant from a string
template<>
RasterOrder from_string<RasterOrder>(std::string const &str);
/// Lexical cast from int64_t to string
std::string lexical_cast(int64_t int_value);
/// Lexical cast a string to a byte array. Returns true if cast is successful or false if invalid.
bool lexical_cast(std::vector<uint8_t> &bytes, NumericTypeID type, std::string const &str);
/// Lexical cast TO a string FROM a byte array. Returns true if cast is successful or false if invalid.
std::string lexical_cast(std::vector<uint8_t> &bytes, NumericTypeID type);
/// Casts from a signed int64 to the destination type. Returns true if successful.
bool cast_from_int64(std::vector<uint8_t> &bytes, NumericTypeID type, int64_t src);
/// Casts from an unsigned int64 to the destination type. Returns true if successful.
bool cast_from_uint64(std::vector<uint8_t> &bytes, NumericTypeID type, uint64_t src);
/// Casts from a real value represented as a double to the destination type. Returns true if successful.
bool cast_from_double(std::vector<uint8_t> &bytes, NumericTypeID type, double src);
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/include/cutlass/library/util.h/0 | {
"file_path": "tools/library/include/cutlass/library/util.h",
"repo_id": "tools",
"token_count": 2185
} | 72 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief
*/
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "conv_reference_operation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_conv3d_reference_operations(Manifest &manifest) {
make_conv_all<
3,
cutlass::half_t, cutlass::layout::TensorNDHWC,
cutlass::half_t, cutlass::layout::TensorNDHWC,
cutlass::half_t, cutlass::layout::TensorNDHWC,
cutlass::half_t,
cutlass::half_t
>(manifest);
make_conv_all<
3,
cutlass::half_t, cutlass::layout::TensorNDHWC,
cutlass::half_t, cutlass::layout::TensorNDHWC,
cutlass::half_t, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_all<
3,
cutlass::half_t, cutlass::layout::TensorNDHWC,
cutlass::half_t, cutlass::layout::TensorNDHWC,
float, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_all<
3,
cutlass::bfloat16_t, cutlass::layout::TensorNDHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNDHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_all<
3,
cutlass::bfloat16_t, cutlass::layout::TensorNDHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNDHWC,
float, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_all<
3,
cutlass::tfloat32_t, cutlass::layout::TensorNDHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNDHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_all<
3,
cutlass::tfloat32_t, cutlass::layout::TensorNDHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNDHWC,
float, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_all<
3,
float, cutlass::layout::TensorNDHWC,
float, cutlass::layout::TensorNDHWC,
float, cutlass::layout::TensorNDHWC,
float,
float
>(manifest);
make_conv_fprop<
3,
int8_t, cutlass::layout::TensorNDHWC,
int8_t, cutlass::layout::TensorNDHWC,
int32_t, cutlass::layout::TensorNDHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
3,
int8_t, cutlass::layout::TensorNDHWC,
int8_t, cutlass::layout::TensorNDHWC,
int8_t, cutlass::layout::TensorNDHWC,
float,
int32_t,
NumericConverterClamp<int8_t, float>
>(manifest);
make_conv_fprop<
3,
uint8_t, cutlass::layout::TensorNDHWC,
uint8_t, cutlass::layout::TensorNDHWC,
int32_t, cutlass::layout::TensorNDHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
3,
uint8_t, cutlass::layout::TensorNDHWC,
uint8_t, cutlass::layout::TensorNDHWC,
int8_t, cutlass::layout::TensorNDHWC,
float,
int32_t,
NumericConverterClamp<int8_t, float>
>(manifest);
make_conv_fprop<
3,
cutlass::int4b_t, cutlass::layout::TensorNDHWC,
cutlass::int4b_t, cutlass::layout::TensorNDHWC,
int32_t, cutlass::layout::TensorNDHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
3,
cutlass::int4b_t, cutlass::layout::TensorNDHWC,
cutlass::int4b_t, cutlass::layout::TensorNDHWC,
cutlass::int4b_t, cutlass::layout::TensorNDHWC,
float,
int32_t,
NumericConverterClamp<cutlass::int4b_t, float>
>(manifest);
make_conv_fprop<
3,
cutlass::uint4b_t, cutlass::layout::TensorNDHWC,
cutlass::uint4b_t, cutlass::layout::TensorNDHWC,
int32_t, cutlass::layout::TensorNDHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
3,
cutlass::uint4b_t, cutlass::layout::TensorNDHWC,
cutlass::uint4b_t, cutlass::layout::TensorNDHWC,
cutlass::uint4b_t, cutlass::layout::TensorNDHWC,
float,
int32_t,
NumericConverterClamp<cutlass::uint4b_t, float>
>(manifest);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/reference/conv3d.cu/0 | {
"file_path": "tools/library/src/reference/conv3d.cu",
"repo_id": "tools",
"token_count": 2382
} | 73 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines reference operations for GEMM operation kinds in CUTLASS Library
*/
#pragma once
#include <iostream>
#include <sstream>
#include <cstring>
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "cutlass/library/util.h"
#include "library_internal.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/device/gemm_complex.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
Provider Provider_,
typename ElementA_,
typename LayoutA_,
cutlass::ComplexTransform TransformA,
typename ElementB_,
typename LayoutB_,
cutlass::ComplexTransform TransformB,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ElementD_ = ElementC_,
typename ConvertOp_ = NumericConverter<ElementD_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
class GemmReferenceOperation : public Operation {
public:
static Provider const kProvider = Provider_;
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA, LayoutA>;
static cutlass::ComplexTransform const kTransformA = TransformA;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB, LayoutB>;
static cutlass::ComplexTransform const kTransformB = TransformB;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementD = ElementD_;
using TensorRefC = TensorRef<ElementC, LayoutC>;
using TensorRefD = TensorRef<ElementD, LayoutC>;
using ElementCompute = ElementCompute_;
using ElementAccumulator = ElementAccumulator_;
using ConvertOp = ConvertOp_;
using InnerProductOp = InnerProductOp_;
protected:
/// Storage for the name string
std::string name_;
///
GemmDescription description_;
public:
/// Constructor
GemmReferenceOperation() {
// Basic information
description_.provider = kProvider;
description_.kind = OperationKind::kGemm;
description_.gemm_kind = GemmKind::kUniversal;
// Tensor description
description_.A = make_TensorDescription<ElementA, LayoutA>();
description_.transform_A = ComplexTransformMap<kTransformA>::kId;
description_.B = make_TensorDescription<ElementB, LayoutB>();
description_.transform_B = ComplexTransformMap<kTransformB>::kId;
description_.C = make_TensorDescription<ElementC, LayoutC>();
description_.D = make_TensorDescription<ElementD, LayoutC>();
// Epilogue compute and accumulator type description
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
// Compute capability for gemm reference
description_.tile_description.minimum_compute_capability =
(kProvider == Provider::kReferenceDevice ? 50 : 0);
description_.tile_description.maximum_compute_capability = 1024;
// Procedural name
std::stringstream ss;
ss << "gemm"
<< "_reference_" << to_string(description_.provider)
<< "_" << to_string(description_.A.element) << to_string(description_.A.layout)
<< "_" << to_string(description_.B.element) << to_string(description_.B.layout)
<< "_" << to_string(description_.C.element) << to_string(description_.C.layout)
<< "_" << to_string(description_.tile_description.math_instruction.element_accumulator);
name_ = ss.str();
description_.name = name_.c_str();
// Epilogue compute and accumulator type description
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
}
/// Returns the description of the GEMM operation
virtual OperationDescription const & description() const {
return description_;
}
virtual Status can_implement(
void const *configuration,
void const *arguments) const {
return Status::kSuccess;
}
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(GemmUniversalConfiguration);
}
virtual uint64_t get_device_workspace_size(
void const *configuration,
void const *arguments = nullptr) const {
return 0;
}
virtual Status initialize(
void const *configuration,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
std::memcpy(host_workspace, configuration, get_host_workspace_size(configuration));
return Status::kSuccess;
}
virtual Status run(
void const *arguments,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
GemmUniversalConfiguration const &config = *static_cast<GemmUniversalConfiguration const *>(host_workspace);
GemmUniversalArguments const &args = *static_cast<GemmUniversalArguments const *>(arguments);
TensorRefA ref_A{static_cast<ElementA *>(const_cast<void *>(args.A)), LayoutA(int(config.lda))};
TensorRefB ref_B{static_cast<ElementB *>(const_cast<void *>(args.B)), LayoutB(int(config.ldb))};
TensorRefC ref_C{static_cast<ElementC *>(const_cast<void *>(args.C)), LayoutC(int(config.ldc))};
TensorRefD ref_D{static_cast<ElementD *>(args.D), LayoutC(int(config.ldd))};
if (kProvider == Provider::kReferenceHost) {
cutlass::reference::host::GemmComplex<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ElementD,
ConvertOp,
InnerProductOp
>(
config.problem_size,
*static_cast<ElementCompute const *>(args.alpha),
ref_A,
kTransformA,
ref_B,
kTransformB,
*static_cast<ElementCompute const *>(args.beta),
ref_C,
ref_D,
ElementAccumulator(),
((config.mode == library::GemmUniversalMode::kBatched) ? config.batch_count : 1),
args.batch_stride_A,
args.batch_stride_B,
args.batch_stride_C,
args.batch_stride_D
);
return Status::kSuccess;
}
else if (kProvider == Provider::kReferenceDevice) {
cutlass::reference::device::GemmComplex<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ElementD,
ConvertOp,
InnerProductOp
>(
config.problem_size,
*static_cast<ElementCompute const *>(args.alpha),
ref_A,
kTransformA,
ref_B,
kTransformB,
*static_cast<ElementCompute const *>(args.beta),
ref_C,
ref_D,
ElementAccumulator(),
((config.mode == library::GemmUniversalMode::kBatched) ? config.batch_count : 1),
args.batch_stride_A,
args.batch_stride_B,
args.batch_stride_C,
args.batch_stride_D
);
return Status::kSuccess;
}
return Status::kErrorNotSupported;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA_,
typename LayoutA_,
cutlass::ComplexTransform TransformA,
typename ElementB_,
typename LayoutB_,
cutlass::ComplexTransform TransformB,
typename ElementC_,
typename LayoutC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ElementD_ = ElementC_,
typename ConvertOp_ = NumericConverter<ElementD_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_gemm(Manifest &manifest) {
manifest.append(new GemmReferenceOperation<
Provider::kReferenceHost,
ElementA_, LayoutA_, TransformA,
ElementB_, LayoutB_, TransformB,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>);
manifest.append(new GemmReferenceOperation<
Provider::kReferenceDevice,
ElementA_, LayoutA_, TransformA,
ElementB_, LayoutB_, TransformB,
ElementC_, LayoutC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>);
}
/// Helper to create NN, NT, TN, and TT GEMM layouts.
template <
typename ElementA_, cutlass::ComplexTransform TransformA,
typename ElementB_, cutlass::ComplexTransform TransformB,
typename ElementC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ElementD_ = ElementC_,
typename ConvertOp_ = NumericConverter<ElementD_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_gemm_canonical_layouts(Manifest &manifest) {
// M Major outputs
make_gemm<
ElementA_, cutlass::layout::ColumnMajor, TransformA,
ElementB_, cutlass::layout::ColumnMajor, TransformB,
ElementC_, cutlass::layout::ColumnMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm<
ElementA_, cutlass::layout::ColumnMajor, TransformA,
ElementB_, cutlass::layout::RowMajor, TransformB,
ElementC_, cutlass::layout::ColumnMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm<
ElementA_, cutlass::layout::RowMajor, TransformA,
ElementB_, cutlass::layout::ColumnMajor, TransformB,
ElementC_, cutlass::layout::ColumnMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm<
ElementA_, cutlass::layout::RowMajor, TransformA,
ElementB_, cutlass::layout::RowMajor, TransformB,
ElementC_, cutlass::layout::ColumnMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
// N Major outputs
make_gemm<
ElementA_, cutlass::layout::ColumnMajor, TransformA,
ElementB_, cutlass::layout::ColumnMajor, TransformB,
ElementC_, cutlass::layout::RowMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm<
ElementA_, cutlass::layout::ColumnMajor, TransformA,
ElementB_, cutlass::layout::RowMajor, TransformB,
ElementC_, cutlass::layout::RowMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm<
ElementA_, cutlass::layout::RowMajor, TransformA,
ElementB_, cutlass::layout::ColumnMajor, TransformB,
ElementC_, cutlass::layout::RowMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm<
ElementA_, cutlass::layout::RowMajor, TransformA,
ElementB_, cutlass::layout::RowMajor, TransformB,
ElementC_, cutlass::layout::RowMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
}
/// Helper to create TN and interleaved layouts GEMM layouts.
template <
int InterleaveK,
typename ElementA_,
typename ElementB_,
typename ElementC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ElementD_ = ElementC_,
typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_gemm_interleaved_layouts(Manifest &manifest) {
make_gemm<
ElementA_, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone,
ElementB_, cutlass::layout::ColumnMajor, cutlass::ComplexTransform::kNone,
ElementC_, cutlass::layout::ColumnMajor,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
}
/// Helper to real-valued GEMM with canonical layouts
template <
typename ElementA_,
typename ElementB_,
typename ElementC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ElementD_ = ElementC_,
typename ConvertOp_ = NumericConverter<ElementD_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_gemm_real_canonical_layouts(Manifest &manifest) {
make_gemm_canonical_layouts<
ElementA_, cutlass::ComplexTransform::kNone,
ElementB_, cutlass::ComplexTransform::kNone,
ElementC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
}
// Helper to create all complex transformation permutations
template <
typename ElementA_,
typename ElementB_,
typename ElementC_,
typename ElementCompute_,
typename ElementAccumulator_ = ElementCompute_,
typename ElementD_ = ElementC_,
typename ConvertOp_ = NumericConverter<ElementD_, ElementCompute_>,
typename InnerProductOp_ = multiply_add<ElementAccumulator_>
>
void make_gemm_complex_canonical_layouts(Manifest &manifest) {
make_gemm_canonical_layouts<
ElementA_, cutlass::ComplexTransform::kNone,
ElementB_, cutlass::ComplexTransform::kNone,
ElementC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm_canonical_layouts<
ElementA_, cutlass::ComplexTransform::kConjugate,
ElementB_, cutlass::ComplexTransform::kConjugate,
ElementC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm_canonical_layouts<
ElementA_, cutlass::ComplexTransform::kNone,
ElementB_, cutlass::ComplexTransform::kConjugate,
ElementC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
make_gemm_canonical_layouts<
ElementA_, cutlass::ComplexTransform::kConjugate,
ElementB_, cutlass::ComplexTransform::kNone,
ElementC_,
ElementCompute_,
ElementAccumulator_,
ElementD_,
ConvertOp_,
InnerProductOp_
>(manifest);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/reference/gemm_reference_operation.h/0 | {
"file_path": "tools/library/src/reference/gemm_reference_operation.h",
"repo_id": "tools",
"token_count": 5792
} | 74 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Gemm Profiler
*/
#pragma once
#include <vector>
#include <string>
#include <memory>
#include <algorithm>
#include <unordered_map>
// CUTLASS Library includes
#include "cutlass/library/library.h"
#include "cutlass/library/util.h"
#include "cutlass/library/manifest.h"
// Profiler includes
#include "options.h"
#include "device_context.h"
#include "operation_profiler.h"
#include "performance_result.h"
#include "problem_space.h"
#include "reduction_operation_profiler.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Abstract base class for each math function
class GemmOperationProfiler : public OperationProfiler {
public:
/// Problem structure obtained from problem space
struct GemmProblem {
cutlass::library::GemmUniversalMode mode{library::GemmUniversalMode::kGemm};
int64_t m{16};
int64_t n{16};
int64_t k{16};
int64_t lda{0};
int64_t ldb{0};
int64_t ldc{0};
std::vector<uint8_t> alpha;
std::vector<uint8_t> beta;
cutlass::library::SplitKMode split_k_mode{library::SplitKMode::kNone};
int split_k_slices{1};
int batch_count{1};
cutlass::library::RasterOrder raster_order{cutlass::library::RasterOrder::kHeuristic};
// gemm with parallel interleaved reduction
// gemm epilogue (alpha, beta) = (1.0, 0.0)
// reduction epilogue (alpha, beta) = (GemmProblem::alpha, GemmProblem::beta)
std::vector<uint8_t> alpha_one;
std::vector<uint8_t> beta_zero;
//
// Methods
//
/// Parses the problem
Status parse(
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Total number of bytes loaded
int64_t bytes(library::GemmDescription const &operation_desc) const;
/// Total number of flops computed
int64_t flops(library::GemmDescription const &operation_desc) const;
/// Initializes a performance result
void initialize_result(
PerformanceResult &result,
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space);
};
/// Workspace used
struct GemmWorkspace {
DeviceAllocation *A{nullptr};
DeviceAllocation *B{nullptr};
DeviceAllocation *C{nullptr};
DeviceAllocation *Computed{nullptr};
DeviceAllocation *Reference{nullptr};
/// Number of copies of the problem workspace which are visited sequentially during
/// profiling to avoid camping in the last level cache.
int problem_count{1};
library::GemmUniversalConfiguration configuration;
library::GemmUniversalArguments arguments;
/// Buffer used for the operation's host workspace
std::vector<uint8_t> host_workspace;
/// Buffer used for the operations' device workspace
DeviceAllocation device_workspace;
/// Library configuration and arguments for reduction operator
library::ReductionConfiguration reduction_configuration;
library::ReductionArguments reduction_arguments;
/// Buffer used for the cutlass reduction operations' host workspace
std::vector<uint8_t> reduction_host_workspace;
};
protected:
//
// Data members
//
/// GEMM problem obtained from problem space
GemmProblem problem_;
/// Device memory allocations
GemmWorkspace gemm_workspace_;
/// CUTLASS parallel reduction operation to follow this* gemm operation
library::Operation const *reduction_op_;
public:
//
// Methods
//
/// Ctor
GemmOperationProfiler(Options const &options);
/// Destructor
virtual ~GemmOperationProfiler();
GemmProblem const& problem() const { return problem_; }
/// Prints usage statement for the math function
virtual void print_usage(std::ostream &out) const;
/// Prints examples
virtual void print_examples(std::ostream &out) const;
/// Extracts the problem dimensions
virtual Status initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Initializes workspace
virtual Status initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Verifies CUTLASS against references
virtual bool verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Measures performance results
virtual bool profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
protected:
/// Initializes the performance result
void initialize_result_(
PerformanceResult &result,
Options const &options,
library::GemmDescription const &operation_desc,
ProblemSpace const &problem_space);
/// Verifies CUTLASS against references
bool verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Verifies CUTLASS against host and device references
bool verify_with_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem,
cutlass::library::NumericTypeID element_A,
cutlass::library::NumericTypeID element_B);
/// Method to profile a CUTLASS Operation
Status profile_cutlass_(
double &runtime,
Options const &options,
library::Operation const *operation,
void *arguments,
void *host_workspace,
void *device_workspace);
/// Initialize reduction problem dimensions and library::Operation
bool initialize_reduction_configuration_(
library::Operation const *operation,
ProblemSpace::Problem const &problem);
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/include/cutlass/profiler/gemm_operation_profiler.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/gemm_operation_profiler.h",
"repo_id": "tools",
"token_count": 2460
} | 75 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Helper functions for mapping CUTLASS concepts to cuDNN.
*/
#if CUTLASS_ENABLE_CUDNN
#include <stdexcept>
#include "cutlass/profiler/cudnn_helpers.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Converts a cuDNN status to cutlass::Status
Status get_cutlass_status(cudnnStatus_t cudnn_status) {
if (cudnn_status == CUDNN_STATUS_SUCCESS) {
return Status::kSuccess;
}
else if (cudnn_status == CUDNN_STATUS_INVALID_VALUE) {
return Status::kErrorInvalidProblem;
}
if (cudnn_status == CUDNN_STATUS_NOT_SUPPORTED) {
return Status::kErrorNotSupported;
}
return Status::kErrorInternal;
}
/// Converts a cuDNN status to cutlass::profiler::Disposition
Disposition get_cutlass_disposition(cudnnStatus_t cudnn_status) {
if (cudnn_status == CUDNN_STATUS_INVALID_VALUE) {
return Disposition::kInvalidProblem;
}
else if (cudnn_status == CUDNN_STATUS_NOT_SUPPORTED) {
return Disposition::kNotSupported;
}
return Disposition::kFailed;
}
/// Checks cudnnStatus_t converts to cutlas status and returns if Status::kSuccess o.w. throws exception
Status checkCudnnErr(cudnnStatus_t cudnn_status) {
Status cutlass_status = get_cutlass_status(cudnn_status);
if(cutlass_status != Status::kSuccess) {
throw std::runtime_error("checkCudnnErr failed");
}
return cutlass_status;
}
/// Maps a CUTLASS conv mode to a cuDNN cudnnConvolutionMode_t
bool get_cudnn_conv_mode(cudnnConvolutionMode_t &cudnn_conv_mode, conv::Mode conv_mode) {
switch (conv_mode) {
case conv::Mode::kCrossCorrelation:
cudnn_conv_mode = CUDNN_CROSS_CORRELATION;
return true;
case conv::Mode::kConvolution:
cudnn_conv_mode = CUDNN_CONVOLUTION;
return true;
default: break;
}
return false;
}
/// Maps a CUTLASS tensor layout to a cuDNN cudnnTensorFormat_t
bool get_cudnn_layout(cudnnTensorFormat_t &cudnn_layout, library::LayoutTypeID layout) {
switch (layout) {
// cudnn uses the same enum for TensorNC*HW along nDim (ConvDescription::conv_dim)
case library::LayoutTypeID::kTensorNCHW:
case library::LayoutTypeID::kTensorNCDHW:
cudnn_layout = CUDNN_TENSOR_NCHW;
return true;
case library::LayoutTypeID::kTensorNHWC:
case library::LayoutTypeID::kTensorNDHWC:
cudnn_layout = CUDNN_TENSOR_NHWC;
return true;
default: break;
}
return false;
}
/// Maps a CUTLASS numeric type to a cuDNN cudnnDataType_t
bool get_cudnn_datatype(cudnnDataType_t &cudnn_element_type, library::NumericTypeID element_type) {
switch (element_type) {
case library::NumericTypeID::kF16:
cudnn_element_type = CUDNN_DATA_HALF;
return true;
case library::NumericTypeID::kF32:
cudnn_element_type = CUDNN_DATA_FLOAT;
return true;
case library::NumericTypeID::kF64:
cudnn_element_type = CUDNN_DATA_DOUBLE;
return true;
case library::NumericTypeID::kS2:
break;
case library::NumericTypeID::kS4:
break;
case library::NumericTypeID::kS8:
cudnn_element_type = CUDNN_DATA_INT8;
return true;
case library::NumericTypeID::kS16:
break;
case library::NumericTypeID::kS32:
cudnn_element_type = CUDNN_DATA_INT32;
return true;
case library::NumericTypeID::kS64:
break;
case library::NumericTypeID::kU2:
break;
case library::NumericTypeID::kU4:
break;
case library::NumericTypeID::kU8:
cudnn_element_type = CUDNN_DATA_UINT8;
return true;
case library::NumericTypeID::kU16:
break;
case library::NumericTypeID::kU32:
break;
case library::NumericTypeID::kU64:
break;
case library::NumericTypeID::kB1:
break;
case library::NumericTypeID::kInvalid:
default:
break;
}
return false;
}
/// Maps CUTLASS math OpcodeClassID and MathOperationID to cuDNN math_type
bool get_cudnn_mathtype(cudnnMathType_t &cudnn_math_type, library::ConvDescription const &conv_desc) {
switch (conv_desc.tile_description.math_instruction.opcode_class) {
case library::OpcodeClassID::kTensorOp:
{
cudnn_math_type = CUDNN_TENSOR_OP_MATH;
library::MathOperationID math_op = conv_desc.tile_description.math_instruction.math_operation;
// Allow conversion on input data type for fast math operations
if (math_op == library::MathOperationID::kMultiplyAddFastF16 ||
math_op == library::MathOperationID::kMultiplyAddFastBF16)
{
cudnn_math_type = CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION;
}
return true;
}
case library::OpcodeClassID::kSimt:
#if (defined(CUDNN_VERSION) && CUDNN_VERSION <= 8000)
cudnn_math_type = CUDNN_DEFAULT_MATH;
#else
cudnn_math_type = CUDNN_FMA_MATH;
#endif
return true;
}
return false;
}
/// Cudnn compute type seems to be hardcoded to float (To handle a possible cudnn issue)
float cast_cudnn_compute_type_to_float(library::NumericTypeID type, void const * src) {
switch (type) {
case library::NumericTypeID::kF16:
{
return float(*(static_cast<half_t const*>(src)));
}
case library::NumericTypeID::kF32:
{
return float(*(static_cast<float const*>(src)));
}
case library::NumericTypeID::kS32:
{
return float(*(static_cast<int const*>(src)));
}
default:
throw std::runtime_error("Data type handled in cast_compute_type_to_float");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuDNN can satisfy a particular Conv2d description
Status cudnn_satisfies(
library::ConvDescription const &desc,
library::Conv2dConfiguration const &configuration) {
auto const &a_tensor = desc.A;
auto const &b_tensor = desc.B;
auto const &c_tensor = desc.C;
auto const &math_instruction = desc.tile_description.math_instruction;
if(a_tensor.element != b_tensor.element) {
return Status::kErrorInvalidDataType;
}
//////////////////////// Convolution output dimensions p and q ///////////////////////
// Cutlass convolutions support arbitrary output dimensions and not constrained by //
// input, filter, padding, striding, dilation sizes. //
// cuDNN sets the output dimensions (p, q) using following equations: //
// //
// output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) //
// where; div_up(a, b) : (a - 1)/b + 1 //
// //
// Before launching cudnn verification or profiling check that output p and q //
// dimensions are cuDNN compliant. //
// //
// If user sets output p and q which do not follow above constraints, cutlass conv, //
// host reference, device reference can run. However, cudnn convolution returns //
// "Invalid problem" //
// //
///////////////////////////////////////////////////////////////////////////////////////
// check conv output dimension p for cudnn
int cudnn_output_p =
(
(
configuration.problem_size.H +
2 * configuration.problem_size.pad_h -
((configuration.problem_size.R - 1) *
configuration.problem_size.dilation_h + 1)
) /
(configuration.problem_size.stride_h)
+ 1
);
if (cudnn_output_p != configuration.problem_size.P) {
return Status::kErrorInvalidProblem;
}
// check conv output dimension q for cudnn
int cudnn_output_q =
(
(
configuration.problem_size.W +
2 * configuration.problem_size.pad_w -
((configuration.problem_size.S - 1) *
configuration.problem_size.dilation_w + 1)
) /
(configuration.problem_size.stride_w)
+ 1
);
if (cudnn_output_q != configuration.problem_size.Q) {
return Status::kErrorInvalidProblem;
}
//////////////////////////////////////////////////////////////////////////////////////
// conv operator with input=FP16, accumulator=FP32, output=FP32 datatype
if (a_tensor.element == library::NumericTypeID::kF16 &&
b_tensor.element == library::NumericTypeID::kF16 &&
math_instruction.element_accumulator == library::NumericTypeID::kF32 &&
c_tensor.element == library::NumericTypeID::kF32
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kBF16 ||
b_tensor.element == library::NumericTypeID::kBF16 ||
c_tensor.element == library::NumericTypeID::kBF16
) {
return Status::kErrorNotSupported;
}
// TF32 input not supported in cuDNN
if (a_tensor.element == library::NumericTypeID::kTF32 ||
b_tensor.element == library::NumericTypeID::kTF32 ||
c_tensor.element == library::NumericTypeID::kTF32
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kS8 ||
b_tensor.element == library::NumericTypeID::kS8 ||
c_tensor.element == library::NumericTypeID::kS8
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kU8 ||
b_tensor.element == library::NumericTypeID::kU8 ||
c_tensor.element == library::NumericTypeID::kU8
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kS4 ||
b_tensor.element == library::NumericTypeID::kS4 ||
c_tensor.element == library::NumericTypeID::kS4
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kU4 ||
b_tensor.element == library::NumericTypeID::kU4 ||
c_tensor.element == library::NumericTypeID::kU4
) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuDNN can satisfy a particular Conv3d description
Status cudnn_satisfies(
library::ConvDescription const &desc,
library::Conv3dConfiguration const &configuration) {
auto const &a_tensor = desc.A;
auto const &b_tensor = desc.B;
auto const &c_tensor = desc.C;
auto const &math_instruction = desc.tile_description.math_instruction;
if(a_tensor.element != b_tensor.element) {
return Status::kErrorInvalidDataType;
}
//////////////////////// Convolution output dimensions p and q ///////////////////////
// Cutlass convolutions support arbitrary output dimensions and not constrained by //
// input, filter, padding, striding, dilation sizes. //
// cuDNN sets the output dimensions (p, q) using following equations: //
// //
// output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) //
// where; div_up(a, b) : (a - 1)/b + 1 //
// //
// Before launching cudnn verification or profiling check that output p and q //
// dimensions are cuDNN compliant. //
// //
// If user sets output p and q which do not follow above constraints, cutlass conv, //
// host reference, device reference can run. However, cudnn convolution returns //
// "Invalid problem" //
// //
///////////////////////////////////////////////////////////////////////////////////////
// check conv output dimension z for cudnn
int cudnn_output_z =
(
(
configuration.problem_size.D +
2 * configuration.problem_size.pad_d -
((configuration.problem_size.T - 1) *
configuration.problem_size.dilation_d + 1)
) /
(configuration.problem_size.stride_d)
+ 1
);
if (cudnn_output_z != configuration.problem_size.Z) {
return Status::kErrorInvalidProblem;
}
// check conv output dimension p for cudnn
int cudnn_output_p =
(
(
configuration.problem_size.H +
2 * configuration.problem_size.pad_h -
((configuration.problem_size.R - 1) *
configuration.problem_size.dilation_h + 1)
) /
(configuration.problem_size.stride_h)
+ 1
);
if (cudnn_output_p != configuration.problem_size.P) {
return Status::kErrorInvalidProblem;
}
// check conv output dimension q for cudnn
int cudnn_output_q =
(
(
configuration.problem_size.W +
2 * configuration.problem_size.pad_w -
((configuration.problem_size.S - 1) *
configuration.problem_size.dilation_w + 1)
) /
(configuration.problem_size.stride_w)
+ 1
);
if (cudnn_output_q != configuration.problem_size.Q) {
return Status::kErrorInvalidProblem;
}
//////////////////////////////////////////////////////////////////////////////////////
// conv operator with input, accumulator, output datatype of (hss) are not supported
// in cuDNN
if (a_tensor.element == library::NumericTypeID::kF16 &&
b_tensor.element == library::NumericTypeID::kF16 &&
math_instruction.element_accumulator == library::NumericTypeID::kF32 &&
c_tensor.element == library::NumericTypeID::kF32
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kBF16 ||
b_tensor.element == library::NumericTypeID::kBF16 ||
c_tensor.element == library::NumericTypeID::kBF16
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kTF32 ||
b_tensor.element == library::NumericTypeID::kTF32 ||
c_tensor.element == library::NumericTypeID::kTF32
) {
return Status::kErrorNotSupported;
}
if (a_tensor.element == library::NumericTypeID::kS8 ||
b_tensor.element == library::NumericTypeID::kS8 ||
c_tensor.element == library::NumericTypeID::kS8
) {
return Status::kErrorNotSupported;
}
// S4 not supported in cuDNN
if (a_tensor.element == library::NumericTypeID::kS4 ||
b_tensor.element == library::NumericTypeID::kS4 ||
c_tensor.element == library::NumericTypeID::kS4
) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
#endif
| tools/profiler/src/cudnn_helpers.cpp/0 | {
"file_path": "tools/profiler/src/cudnn_helpers.cpp",
"repo_id": "tools",
"token_count": 6868
} | 76 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cutlass/profiler/cublas_helpers.h"
#include "cutlass/profiler/symm_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
SymmOperationProfiler::SymmOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kSymm,
{
{ArgumentTypeID::kEnumerated, {"symm_kind"}, "Variant of Symm (universal)"},
{ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the Symm problem space"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the Symm problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kEnumerated, {"side_mode"}, "Side Mode for Symm kernel (left or right)"},
{ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for Symm kernel (lower or upper)"},
{ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for Symm kernel (symmetric or hermitian)"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of Symm computed in one batch"},
},
{ library::Provider::kCUBLAS }
) {
description_ = " Symmetric Matrix-Matrix Multiplication. D = alpha * A * B OR alpha * B * A + beta * C (where A is symmetric/hermitian)";
}
/// Destructor
SymmOperationProfiler::~SymmOperationProfiler() {
}
/// Prints usage statement for the math function
void SymmOperationProfiler::print_usage(std::ostream &out) const {
out << "Symm" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void SymmOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size SYMM kernel:\n"
<< " $ cutlass_profiler --operation=Symm --blas_mode=symmetric --m=1024 --n=128\n\n"
<< "Profile a particular problem size HEMM kernel:\n"
<< " $ cutlass_profiler --operation=Symm --blas_mode=hermitian --m=1024 --n=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=Symm --m=1024:4096:256 --n=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=Symm --accumulator-type=f16,f32\n\n"
<< "Schmoo over side modees:\n"
<< " $ cutlass_profiler --operation=Symm --side_mode=left/right\n\n"
<< "Schmoo over fill modees:\n"
<< " $ cutlass_profiler --operation=Symm --fill_mode=lower/upper\n\n"
<< "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=Symm --A=f16:column or --A=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=Symm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=Symm --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=Symm --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=Symm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to symm kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=Symm \\ \n"
<< " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --n=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status SymmOperationProfiler::SymmProblem::parse(
library::SymmDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
if (operation_desc.side_mode == SideMode::kLeft) {
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->m), int(this->m)}).front();
}
else if (operation_desc.side_mode == SideMode::kRight) {
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->n), int(this->n)}).front();
}
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->m), int(this->n)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->m), int(this->n)}).front();
return Status::kSuccess;
}
/// Total number of bytes loaded
int64_t SymmOperationProfiler::SymmProblem::bytes(library::SymmDescription const &operation_desc) const {
int64_t bytes = 0;
// Input bytes read and Output bytes written for the gemm problem
// Half matrix including the diagonal will have (X*(X+1))/2 elements
if (operation_desc.side_mode == SideMode::kLeft) {
bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * m / 8) * (m + 1) / 2 +
int64_t(library::sizeof_bits(operation_desc.B.element) * m / 8) * n +
int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
} else if (operation_desc.side_mode == SideMode::kRight) {
bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * (n + 1) / 2 +
int64_t(library::sizeof_bits(operation_desc.B.element) * m / 8) * n +
int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
}
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
}
bytes *= batch_count;
return bytes;
}
/// Total number of flops computed
int64_t SymmOperationProfiler::SymmProblem::flops(library::SymmDescription const &operation_desc) const {
// FLOPs for first TRMM kernel (with diagonal) = 2 * [ ( M * (M+1)/2 * N ) ] // Beta is zero
// FLOPs for second TRMM kernel (with diagonal) = 2 * [ ( M * (M-1)/2 * N ) ] // Beta is zero
// FLOPs = m*(m+1)*n [mma1] + m*(m-1)*n [mma2] + 2*m*n [epilogue]
// FLOPs = 2*m*n(m+1) for left side mode
// FLOPs can also be calculated to be same as GEMM with correct value for 'k' as below.
int64_t k = (operation_desc.side_mode == SideMode::kLeft) ? int64_t(m) : int64_t(n);
int64_t flops_ = (int64_t(m) * n * k + m * n) * 2;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddGaussianComplex:
flops_ *= 3;
break;
default: break;
}
return flops_;
}
/// Initializes a performance result
void SymmOperationProfiler::SymmProblem::initialize_result(
PerformanceResult &result,
library::SymmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "symm_kind", problem_space, library::to_string(operation_desc.symm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "side_mode", problem_space, library::to_string(operation_desc.side_mode));
set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status SymmOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::SymmDescription const &operation_desc =
static_cast<library::SymmDescription const &>(operation->description());
if (operation_desc.symm_kind != library::SymmKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
symm_workspace_.configuration.problem_size.m() = int(problem_.m);
symm_workspace_.configuration.problem_size.n() = int(problem_.n);
symm_workspace_.configuration.problem_size.k() = (operation_desc.side_mode == SideMode::kLeft)
? int(problem_.m) : int(problem_.n);
symm_workspace_.configuration.lda = problem_.lda;
symm_workspace_.configuration.ldb = problem_.ldb;
symm_workspace_.configuration.ldc = problem_.ldc;
symm_workspace_.configuration.ldd = problem_.ldc;
//symm_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
symm_workspace_.configuration.batch_count = int(problem_.split_k_slices);
symm_workspace_.arguments.A = nullptr;
symm_workspace_.arguments.B = nullptr;
symm_workspace_.arguments.C = nullptr;
symm_workspace_.arguments.D = nullptr;
symm_workspace_.arguments.alpha = problem_.alpha.data();
symm_workspace_.arguments.beta = problem_.beta.data();
symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&symm_workspace_.configuration, &symm_workspace_.arguments);
}
/// Initializes the performance result
void SymmOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::SymmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
result.bytes = problem_.bytes(operation_desc);
result.flops = problem_.flops(operation_desc);
result.runtime = 0;
}
/// Initializes workspace
Status SymmOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::SymmDescription const &operation_desc =
static_cast<library::SymmDescription const &>(operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
int seed_shift = 0;
if (operation_desc.side_mode == SideMode::kLeft) {
symm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.m), int(problem_.m)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
} else if (operation_desc.side_mode == SideMode::kRight) {
symm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
}
symm_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldb)},
1, // batch_count
seed_shift++
);
symm_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
1, // batch_count
seed_shift++
);
symm_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)}
);
symm_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)}
);
symm_workspace_.Computed->copy_from_device(symm_workspace_.C->data());
symm_workspace_.Reference->copy_from_device(symm_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(&symm_workspace_.configuration);
symm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(&symm_workspace_.configuration);
symm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = operation->initialize(
&symm_workspace_.configuration,
symm_workspace_.host_workspace.data(),
symm_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kSymm;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool SymmOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing Symm arguments
symm_workspace_.arguments.A = symm_workspace_.A->data();
symm_workspace_.arguments.B = symm_workspace_.B->data();
symm_workspace_.arguments.C = symm_workspace_.C->data();
symm_workspace_.arguments.D = symm_workspace_.Computed->data();
symm_workspace_.arguments.alpha = problem_.alpha.data();
symm_workspace_.arguments.beta = problem_.beta.data();
symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&symm_workspace_.arguments,
symm_workspace_.host_workspace.data(),
symm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const & symm_desc = static_cast<library::SymmDescription const &>(operation->description());
if (cublas_satisfies(symm_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool SymmOperationProfiler::verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
#if CUTLASS_ENABLE_CUBLAS
library::SymmDescription const &symm_desc =
static_cast<library::SymmDescription const &>(operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
cublasStatus_t status = handle.get_cublas_create_status();
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublas<t>Symm()
//
// Initialize structure containing Symm arguments
symm_workspace_.arguments.A = symm_workspace_.A->data();
symm_workspace_.arguments.B = symm_workspace_.B->data();
symm_workspace_.arguments.C = symm_workspace_.Reference->data();
symm_workspace_.arguments.D = symm_workspace_.Reference->data();
symm_workspace_.arguments.alpha = problem_.alpha.data();
symm_workspace_.arguments.beta = problem_.beta.data();
symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
detail::cublasSymmDispatcher symm_op(
symm_desc,
symm_workspace_.configuration,
symm_workspace_.arguments
);
if (symm_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = symm_op(handle);
// Handle errors
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
options,
*symm_workspace_.Computed,
*symm_workspace_.Reference
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
symm_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool SymmOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing Symm arguments
symm_workspace_.arguments.A = symm_workspace_.A->data();
symm_workspace_.arguments.B = symm_workspace_.B->data();
symm_workspace_.arguments.C = symm_workspace_.C->data();
symm_workspace_.arguments.D = symm_workspace_.Computed->data();
symm_workspace_.arguments.alpha = problem_.alpha.data();
symm_workspace_.arguments.beta = problem_.beta.data();
symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&symm_workspace_.arguments,
symm_workspace_.host_workspace.data(),
symm_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/symm_operation_profiler.cu/0 | {
"file_path": "tools/profiler/src/symm_operation_profiler.cu",
"repo_id": "tools",
"token_count": 9432
} | 77 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief utils code for device cutlass code
*/
#pragma once
#include <cuda_fp16.h>
#include <float.h>
#define FINAL_MASK 0xffffffff
struct half4 {
half x, y, z, w;
};
template<typename T, int NUM>
__inline__ __device__ T warpReduceSum(T* val)
{
#pragma unroll
for (int i = 0; i < NUM; i++) {
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1)
val[i] += __shfl_xor_sync(FINAL_MASK, val[i], mask, 32);
}
return (T)(0.0f);
}
template<typename T, int NUM>
__inline__ __device__ T blockReduceSum(T* val)
{
__shared__ T shared[NUM][33];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
warpReduceSum<T, NUM>(val);
if (lane == 0) {
#pragma unroll
for (int i = 0; i < NUM; i++) {
shared[i][wid] = val[i];
}
}
__syncthreads();
bool is_mask = threadIdx.x < (blockDim.x / 32.f);
#pragma unroll
for (int i = 0; i < NUM; i++) {
val[i] = is_mask ? shared[i][lane] : (T)(0.0f);
}
warpReduceSum<T, NUM>(val);
return (T)0.0f;
}
template<typename T, int NUM>
__inline__ __device__ T warpReduceMax(T* val)
{
#pragma unroll
for (int i = 0; i < NUM; i++) {
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1)
val[i] = max(val[i], __shfl_xor_sync(FINAL_MASK, val[i], mask, 32));
}
return (T)(0.0f);
}
template<typename T, int NUM>
__inline__ __device__ T blockReduceMax(T* val)
{
static __shared__ T shared[32][NUM];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
warpReduceMax<T, NUM>(val); // get maxx in each warp
if (lane == 0) // record in-warp maxx by warp Idx
{
#pragma unroll
for (int i = 0; i < NUM; i++) {
shared[wid][i] = val[i];
}
}
__syncthreads();
// Modify from blockDim.x << 5 to blockDim.x / 32. to prevent
// blockDim.x is not divided by 32
bool is_mask = threadIdx.x < (blockDim.x / 32.f);
#pragma unroll
for (int i = 0; i < NUM; i++) {
val[i] = is_mask ? shared[lane][i] : (T)(-FLT_MAX);
}
warpReduceMax<T, NUM>(val);
return (T)0.0f;
}
| tools/util/include/cutlass/util/device_utils.h/0 | {
"file_path": "tools/util/include/cutlass/util/device_utils.h",
"repo_id": "tools",
"token_count": 1513
} | 78 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for complex-valued GEMM in device-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
namespace cutlass {
namespace reference {
namespace device {
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename ElementD = ElementC,
typename ConvertOp = NumericConverter<ElementD, ScalarType>,
typename InnerProductOp = multiply_add<ComputeType>,
int kMblock = 4,
int kNblock = 4
>
__global__ void GemmComplex(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRef<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementD, LayoutC> tensor_d,
ComputeType initial_accum,
int batch_count = 1,
int64_t batch_stride_A = 0,
int64_t batch_stride_B = 0,
int64_t batch_stride_C = 0,
int64_t batch_stride_D = 0) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
int const M = problem_size.m();
int const N = problem_size.n();
int const K = problem_size.k();
ConvertOp convert_op;
InnerProductOp inner_product_op;
int row_block = (blockIdx.x * blockDim.x + threadIdx.x) * kMblock;
int col_block = (blockIdx.y * blockDim.y + threadIdx.y) * kNblock;
int batch_idx = blockIdx.z;
tensor_a.add_pointer_offset(batch_idx * batch_stride_A);
tensor_b.add_pointer_offset(batch_idx * batch_stride_B);
tensor_c.add_pointer_offset(batch_idx * batch_stride_C);
tensor_d.add_pointer_offset(batch_idx * batch_stride_D);
for (; batch_idx < batch_count; batch_idx += gridDim.z) {
// Compute matrix product using blocks
ComputeType accum[kMblock][kNblock];
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kNblock; j++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kMblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kNblock; j++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kMblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N) {
ElementA a = tensor_a.at(MatrixCoord(row, k_block));
ElementB b = tensor_b.at(MatrixCoord(k_block, col));
ComputeType a_ik = ComputeType(a);
ComputeType b_kj = ComputeType(b);
if (transform_a == ComplexTransform::kConjugate) {
a_ik = conj(a_ik);
}
if (transform_b == ComplexTransform::kConjugate) {
b_kj = conj(b_kj);
}
accum[i][j] = inner_product_op(a_ik, b_kj, accum[i][j]);
}
}
}
}
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kNblock; j++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kMblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]) +
beta * ScalarType(tensor_c.at(coord)));
}
}
}
tensor_a.add_pointer_offset(batch_stride_A * gridDim.z);
tensor_b.add_pointer_offset(batch_stride_B * gridDim.z);
tensor_c.add_pointer_offset(batch_stride_C * gridDim.z);
tensor_d.add_pointer_offset(batch_stride_D * gridDim.z);
} // for (batch_idx)
}
} // namespace kernel
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename ElementD = ElementC,
typename ConvertOp = NumericConverter<ElementD, ScalarType>,
typename InnerProductOp = multiply_add<ComputeType>
>
void GemmComplex(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRef<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementD, LayoutC> tensor_d,
ComputeType initial_accum,
int batch_count = 1,
int64_t batch_stride_A = 0,
int64_t batch_stride_B = 0,
int64_t batch_stride_C = 0,
int64_t batch_stride_D = 0) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
int const kMblock = 4;
int const kNblock = 4;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * kMblock - 1) / (block.x * kMblock),
(problem_size.n() + block.y * kNblock - 1) / (block.y * kNblock),
batch_count % std::numeric_limits<uint16_t>::max()
);
if (grid.y <= std::numeric_limits<uint16_t>::max()) {
kernel::GemmComplex<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ScalarType,
ComputeType,
ElementD,
ConvertOp,
InnerProductOp,
kMblock,
kNblock
><<< grid, block >>>(
problem_size,
alpha,
tensor_a,
transform_a,
tensor_b,
transform_b,
beta,
tensor_c,
tensor_d,
initial_accum,
batch_count,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D
);
} else {
// Using bigger thread tile size
int const kBigMblock = 4;
int const kBigNblock = 16;
dim3 Bigblock(16, 8);
dim3 Biggrid(
(problem_size.m() + block.x * kBigMblock - 1) / (block.x * kBigMblock),
(problem_size.n() + block.y * kBigNblock - 1) / (block.y * kBigNblock),
batch_count % std::numeric_limits<uint16_t>::max()
);
kernel::GemmComplex<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ScalarType,
ComputeType,
ElementD,
ConvertOp,
InnerProductOp,
kBigMblock,
kBigNblock
><<< Biggrid, Bigblock >>>(
problem_size,
alpha,
tensor_a,
transform_a,
tensor_b,
transform_b,
beta,
tensor_c,
tensor_d,
initial_accum,
batch_count,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D
);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// This assumes the accumulator type is the same type as the scalars.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ElementD = ElementC
>
void GemmComplex(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRef<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementD, LayoutC> tensor_d) {
GemmComplex(problem_size, alpha, tensor_a, transform_a, tensor_b, transform_b, beta, tensor_c, tensor_d, ScalarType(0));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/device/gemm_complex.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/gemm_complex.h",
"repo_id": "tools",
"token_count": 4070
} | 79 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for GEMM in host-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/mma.h"
#include "cutlass/util/host_tensor.h"
namespace cutlass {
namespace reference {
namespace host {
template<typename Out, typename In>
struct CastIfScalar {
static Out cast(In in) {
return Out(in);
}
};
template<typename OutScalar, typename In>
struct CastIfScalar<cutlass::complex<OutScalar>, In> {
typedef cutlass::complex<OutScalar> Out;
static Out cast(In in) {
return Out(static_cast<OutScalar>(in));
}
};
template<typename OutScalar, typename InScalar>
struct CastIfScalar<cutlass::complex<OutScalar>, cutlass::complex<InScalar>> {
typedef cutlass::complex<OutScalar> Out;
typedef cutlass::complex<InScalar> In;
static Out cast(In in) {
return Out(in);
}
};
template<typename Out, typename In>
Out cast_if_scalar(In in) {
return CastIfScalar<Out, In>::cast(in);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Mblock = 16;
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
for (int row_block = 0; row_block < M; row_block += Mblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Mblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N) {
ElementA a = tensor_a.at(MatrixCoord(row, k_block));
ElementB b = tensor_b.at(MatrixCoord(k_block, col));
ComputeType compute_a(cast_if_scalar<ComputeType>(a));
ComputeType compute_b(cast_if_scalar<ComputeType>(b));
accum[i][j] = inner_product_op(compute_a, compute_b, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]) +
beta * ScalarType(tensor_c.at(coord)));
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum) {
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, InnerProductOp, ConvertOp>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c,
initial_accum);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = cutlass::arch::OpMultiplyAdd
>
struct Gemm;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAdd> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAddFastBF16> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add-saturate
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAddSaturate> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for XOR-popc
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpXorPopc> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, xor_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, xor_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
/// Partial specialization for AND-popc
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpAndPopc> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, and_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, and_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAddFastF32> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Batched GEMM
//
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a batch of GEMMs over a set of matrices of common dimension.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c,
AccumulatorType initial_accum) {
typename TensorRefCollectionA::ConstIterator tensor_a_it = tensor_a.begin();
typename TensorRefCollectionB::ConstIterator tensor_b_it = tensor_b.begin();
typename TensorRefCollectionC::ConstIterator tensor_c_it = tensor_c.begin();
for (int batch = 0;
batch < batch_count;
++batch, ++tensor_a_it, ++tensor_b_it, ++tensor_c_it) {
Gemm<typename TensorRefCollectionA::Element,
typename TensorRefCollectionA::Layout,
typename TensorRefCollectionB::Element,
typename TensorRefCollectionB::Layout,
typename TensorRefCollectionC::Element,
typename TensorRefCollectionC::Layout,
typename TensorRefCollectionC::Element,
typename TensorRefCollectionC::Element>
gemm;
gemm(problem_size, alpha, *tensor_a_it, *tensor_b_it, beta, *tensor_c_it,
initial_accum);
}
}
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c) {
BatchedGemm(problem_size, batch_count, alpha, tensor_a, tensor_b, beta, tensor_c, ScalarType(0));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/host/gemm.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/gemm.h",
"repo_id": "tools",
"token_count": 8247
} | 80 |
var searchData=
[
['xor_5fadd',['xor_add',['../structcutlass_1_1xor__add.html',1,'cutlass']]]
];
| docs/search/all_17.js/0 | {
"file_path": "docs/search/all_17.js",
"repo_id": "docs",
"token_count": 48
} | 0 |
var searchData=
[
['xor_5fadd',['xor_add',['../structcutlass_1_1xor__add.html',1,'cutlass']]]
];
| docs/search/classes_15.js/0 | {
"file_path": "docs/search/classes_15.js",
"repo_id": "docs",
"token_count": 48
} | 1 |
var searchData=
[
['kernellaunchconfiguration',['KernelLaunchConfiguration',['../structcutlass_1_1KernelLaunchConfiguration.html',1,'cutlass']]]
];
| docs/search/classes_9.js/0 | {
"file_path": "docs/search/classes_9.js",
"repo_id": "docs",
"token_count": 48
} | 2 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS layout visualization example
*/
#pragma once
#include <algorithm>
#include <stdexcept>
#include <vector>
#include "cutlass/coord.h"
#include "cutlass/util/reference/host/tensor_foreach.h"
#include "register_layout.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord, int Rank>
struct vector_to_coord {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[Rank - 1] = vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 1> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[0] = vec.at(0);
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 0> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
std::ostream &operator<<(std::ostream &out, std::vector<T> const &vec) {
auto it = vec.begin();
if (it != vec.end()) {
out << *it;
for (++it; it != vec.end(); ++it) {
out << ", " << *it;
}
}
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permits copying static-length vectors into dynamic vectors
template <typename TensorCoord, int Rank>
struct coord_to_vector {
coord_to_vector(std::vector<int> &vec, TensorCoord const &coord) {
vec.at(Rank - 1) = coord[Rank - 1];
coord_to_vector<TensorCoord, Rank - 1>(vec, coord);
}
};
/// Permits copying static-length vectors into dynamic vectors
template <typename TensorCoord>
struct coord_to_vector<TensorCoord, 1> {
coord_to_vector(std::vector<int> &vec, TensorCoord const &coord) {
vec.at(0) = coord[0];
}
};
/// Permits copying static-length vectors into dynamic vectors
template <typename TensorCoord>
struct coord_to_vector<TensorCoord, 0> {
coord_to_vector(std::vector<int> &vec, TensorCoord const &coord) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure representing an element in source memory
struct Element {
std::vector<int> coord; ///< logical coordinate of element (as vector)
int offset; ///< linear offset from source memory
int color; ///< enables coloring each element to indicate
/// Default ctor
inline Element(): offset(-1), color(0) { }
/// Construct from logical coordinate and initial offset
inline Element(
std::vector<int> const &coord_,
int offset_,
int color_ = 0
):
coord(coord_), offset(offset_), color(color_) { }
/// Returns true if element is in a defined state
inline bool valid() const {
return offset >= 0;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Visualizes memory layouts by constructing a 'shape'
template <typename Layout_>
class VisualizeLayout : public VisualizeLayoutBase {
public:
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using Stride = typename Layout::Stride;
public:
Options options;
Layout layout;
TensorCoord extent;
std::vector<Element> elements;
public:
/// Initializes the problem space
VisualizeLayout() {
}
/// visualization method
bool visualize(Options const &options_) {
options = options_;
if (options.extent.size() != TensorCoord::kRank) {
std::cerr
<< "--extent must have rank " << TensorCoord::kRank
<< " (given: " << options.extent.size() << ")" << std::endl;
return false;
}
vector_to_coord<TensorCoord, TensorCoord::kRank>(extent, options.extent);
// Construct the layout for a packed tensor
if (options.stride.empty()) {
layout = Layout::packed(extent);
}
else if (options.stride.size() != Stride::kRank) {
std::cerr
<< "--stride must have rank " << Stride::kRank
<< " (given: " << options.stride.size() << ")" << std::endl;
return false;
}
else {
// Stride from
Stride stride;
vector_to_coord<Stride, Stride::kRank>(stride, options.stride);
layout = Layout(stride);
}
// Resize elements, setting elements to 'undefined' state
elements.resize(layout.capacity(extent));
// enumerate points in tensor space and assign
cutlass::reference::host::TensorForEachLambda(
extent,
[&](TensorCoord coord) {
std::vector<int> coord_vec(TensorCoord::kRank, 0);
coord_to_vector<TensorCoord, TensorCoord::kRank>(coord_vec, coord);
int offset = int(layout(coord));
if (offset >= int(elements.size())) {
std::cerr
<< "Layout error - " << coord_vec
<< " is out of range (computed offset: " << offset
<< ", capacity: " << elements.size() << std::endl;
throw std::out_of_range("(TensorForEach) layout error - coordinate out of range");
}
elements.at(offset) = Element(coord_vec, offset);
});
return true;
}
/// Verifies the layout satisfies vectorization requirements
bool verify(bool verbose, std::ostream &out) {
return true;
}
private:
/// returns a pair (is_vectorizable, one_changing_rank) to determine if a
/// vector exists (consecutive logical coordinates or uniformly invalid)
/// at the given location.
std::pair< bool, int > _is_vectorizable(int i) const {
// (all elements are invalid) or
// (all elements are valid AND
// exactly one rank is changing AND
// elements are consecutive)
// Don't need vectorization.
if (options.vectorize <= 2) return std::make_pair(false, -1);
// Boundary check.
if (i > int(elements.size()) || (i + options.vectorize - 1) > int(elements.size()))
return std::make_pair(false, -1);
// Check if either all elements are valid or invalid.
bool all_elements_invalid = std::all_of(
elements.begin() + i, elements.begin() + i + options.vectorize,
[](Element const &e) { return !e.valid(); });
bool all_elements_valid = std::all_of(
elements.begin() + i, elements.begin() + i + options.vectorize,
[](Element const &e) { return e.valid(); });
if (!all_elements_invalid && !all_elements_valid)
return std::make_pair(false, -1);
// From here, it is vectorizable.
if (all_elements_invalid) return std::make_pair(true, -1);
// Check if only exactly one rank is changing.
int one_changing_rank = -1;
for (int j = 0; j < options.vectorize; ++j) {
for (int r = 0; r < TensorCoord::kRank; ++r) {
if (elements.at(i + j).coord.at(r) != elements.at(i).coord.at(r)) {
if (one_changing_rank == -1) {
one_changing_rank = r;
} else if (one_changing_rank != r) {
return std::make_pair(false, -1);
}
}
}
}
return std::make_pair(true, one_changing_rank);
}
/// Prints a vector of elements
void _print_vector(std::ostream &out, int i, int one_changing_rank) {
Element const &base_element = elements.at(i);
if (base_element.valid()) {
out << "(";
for (int r = 0; r < TensorCoord::kRank; ++r) {
if (r) {
out << ", ";
}
if (r == one_changing_rank) {
out
<< base_element.coord.at(r)
<< ".."
<< (base_element.coord.at(r) + options.vectorize - 1);
}
else {
out << base_element.coord.at(r);
}
}
out << ")";
}
else {
out << " ";
}
}
/// Prints a single element
void _print_element(std::ostream &out, int k) {
Element const &element = elements.at(k);
if (element.valid()) {
out << "(";
for (int v = 0; v < TensorCoord::kRank; ++v) {
out << (v ? ", " : "") << element.coord.at(v);
}
out << ")";
}
else {
out << " ";
}
}
public:
/// Pretty-prints the layout to the console
void print_csv(std::ostream &out, char delim = '|', char new_line = '\n') {
int row = -1;
for (int i = 0; i < int(elements.size()); i += options.vectorize) {
if (i % options.output_shape.at(0)) {
out << delim;
}
else {
if (row >= 0) {
out << new_line;
}
++row;
if (row == options.output_shape.at(1)) {
out << new_line;
row = 0;
}
}
auto is_vector = _is_vectorizable(i);
if (is_vector.first) {
_print_vector(out, i, is_vector.second); // print a vector starting at element i
}
else {
for (int j = 0; j < options.vectorize; ++j) { // print individual elements [i..i+j)
_print_element(out, i + j);
}
}
}
out << new_line << std::flush;
}
/// Help message
virtual std::ostream &print_help(std::ostream &out) {
out << "TensorCoord rank " << TensorCoord::kRank << ", Stride rank: " << Stride::kRank;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/03_visualize_layout/visualize_layout.h/0 | {
"file_path": "examples/03_visualize_layout/visualize_layout.h",
"repo_id": "examples",
"token_count": 4166
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Planar Complex Array Example
This example demonstrates the CUTLASS Library's exposure of planar complex GEMM kernels which
execute a batch of matrix products, loading problem sizes and matrix base pointers from arrays
in global memory.
These kernels represent complex matrices by storing the real and imaginary parts of the matrix in
disjoint regions in memory. These real-valued matrices are stored using existing cuBLAS layouts
as either column-major or row-major layouts with a single leading dimension indicating the stride
between columns or rows.
The CUTLASS Library collects multiple template instantiations in a data structure and offers
a BLAS-like dispatch API to invoke the appropriate kernel on the Volta or Turing architectures.
CUTLASS decouples matrix layout from complex transformation, so four possible transformations
are possible on the A and B operands:
n: column-major
c: column-major complex conjugate
t: row-major
h: row-major complex conjugate
To build strictly the planar complex kernels needed for general application, execute the following
CMake command in an empty build directory.
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \
-DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_*gemm_planar_complex
This builds all planar complex GEMM variants for Volta and Turing architectures.
To build strictly the kernels needed for this example, an even narrower filter string may be
specified as follows. This only builds planar complex GEMMs targeting Tensor Cores for
the 'CN' layout configuration (conjugate A operand with both A and B as column-major).
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \
-DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_f16_s*gemm_planar_complex_array_f16*cn
$ make 11_planar_complex_array
$ ./examples/11_planar_complex_array/11_planar_complex_array --m=2048 --n=1024 --k=512 --batch=10
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor_planar_complex.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/device/gemm_planar_complex.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/library/handle.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
cutlass::complex<float> alpha;
cutlass::complex<float> beta;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({1024, 1024, 1024}),
batch_count(1),
reference_check(true),
iterations(20),
alpha(1),
beta() { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("batch", batch_count);
cmd.get_cmd_line_argument("alpha", alpha.real());
cmd.get_cmd_line_argument("alpha_i", alpha.imag());
cmd.get_cmd_line_argument("beta", beta.real());
cmd.get_cmd_line_argument("beta_i", beta.imag());
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "11_planar_complex_array example\n\n"
<< " This example uses the CUTLASS Library to execute Planar Complex Array GEMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --batch=<int> Number of GEMM operations executed in one batch\n"
<< " --alpha=<f32> Epilogue scalar alpha (real part)\n"
<< " --alpha_i=<f32> Epilogue scalar alpha (imaginary part)\n"
<< " --beta=<f32> Epilogue scalar beta (real part)\n\n"
<< " --beta_i=<f32> Epilogue scalar beta (imaginary part)\n\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/11_planar_complex_array/11_planar_complex_array\n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product() * batch_count * 4;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Performance test environment for planar complex
class TestbedPlanarComplex {
public:
// Half-precision input and output
using Element = cutlass::half_t;
// Configurations for layouts and internal computation
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementCompute = float;
using ElementAccumulator = float;
//
// Data members
//
cutlass::library::Handle handle;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
cutlass::DeviceAllocation<Element> tensor_A;
cutlass::DeviceAllocation<Element> tensor_B;
cutlass::DeviceAllocation<Element> tensor_C;
cutlass::DeviceAllocation<Element> tensor_D;
cutlass::DeviceAllocation<Element> tensor_D_ref;
cutlass::DeviceAllocation<void *> ptr_A_real;
cutlass::DeviceAllocation<void *> ptr_A_imag;
cutlass::DeviceAllocation<void *> ptr_B_real;
cutlass::DeviceAllocation<void *> ptr_B_imag;
cutlass::DeviceAllocation<void *> ptr_C_real;
cutlass::DeviceAllocation<void *> ptr_C_imag;
cutlass::DeviceAllocation<void *> ptr_D_real;
cutlass::DeviceAllocation<void *> ptr_D_imag;
//
// Methods
//
TestbedPlanarComplex(
Options const &options
):
problem_size(options.problem_size), batch_count(options.batch_count) {
// Allocate device memory for batched planar complex GEMM
tensor_A.reset(int64_t(problem_size.m()) * problem_size.k() * batch_count * 2);
tensor_B.reset(int64_t(problem_size.k()) * problem_size.n() * batch_count * 2);
tensor_C.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
tensor_D.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
tensor_D_ref.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
ptr_A_real.reset(batch_count);
ptr_A_imag.reset(batch_count);
ptr_B_real.reset(batch_count);
ptr_B_imag.reset(batch_count);
ptr_C_real.reset(batch_count);
ptr_C_imag.reset(batch_count);
ptr_D_real.reset(batch_count);
ptr_D_imag.reset(batch_count);
}
void initialize() {
uint64_t seed = 1073;
// Use small integers to simplify correctness checking
int scope_max = 6;
int scope_min = -6;
cutlass::reference::device::BlockFillRandomUniform(
tensor_A.get(), tensor_A.size(), seed, Element(scope_max), Element(scope_min), 0);
cutlass::reference::device::BlockFillRandomUniform(
tensor_B.get(), tensor_B.size(), seed * 2019, Element(scope_max), Element(scope_min), 0);
cutlass::reference::device::BlockFillRandomUniform(
tensor_C.get(), tensor_C.size(), seed * 2020, Element(scope_max), Element(scope_min), 0);
}
Result profile(Options const &options) {
Result result;
initialize();
Element *ptr_A = tensor_A.get();
Element *ptr_B = tensor_B.get();
Element *ptr_C = tensor_C.get();
Element *ptr_D = tensor_D.get();
int64_t batch_stride_A = int64_t(problem_size.m()) * problem_size.k() * 2;
int64_t batch_stride_B = int64_t(problem_size.k()) * problem_size.n() * 2;
int64_t batch_stride_C = int64_t(problem_size.m()) * problem_size.n() * 2;
int64_t batch_stride_D = int64_t(problem_size.m()) * problem_size.n() * 2;
typename LayoutA::Stride::Index lda = LayoutA::packed({problem_size.m(), problem_size.k()}).stride(0);
typename LayoutB::Stride::Index ldb = LayoutB::packed({problem_size.k(), problem_size.n()}).stride(0);
typename LayoutC::Stride::Index ldc = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0);
typename LayoutC::Stride::Index ldd = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0);
int64_t imag_stride_A = int64_t(problem_size.m()) * problem_size.k();
int64_t imag_stride_B = int64_t(problem_size.k()) * problem_size.n();
int64_t imag_stride_C = int64_t(problem_size.m()) * problem_size.n();
int64_t imag_stride_D = int64_t(problem_size.m()) * problem_size.n();
//
// Configure pointers in global memory
//
struct {
Element *base;
void **ptr_real;
void **ptr_imag;
int64_t batch_stride;
int64_t imag_stride;
} tensors[] = {
{ tensor_A.get(), ptr_A_real.get(), ptr_A_imag.get(), batch_stride_A, imag_stride_A},
{ tensor_B.get(), ptr_B_real.get(), ptr_B_imag.get(), batch_stride_B, imag_stride_B},
{ tensor_C.get(), ptr_C_real.get(), ptr_C_imag.get(), batch_stride_C, imag_stride_C},
{ tensor_D.get(), ptr_D_real.get(), ptr_D_imag.get(), batch_stride_D, imag_stride_D}
};
for (auto const &tensor : tensors) {
for (int idx = 0; idx < batch_count; ++idx) {
void *ptr_real = tensor.base + idx * tensor.batch_stride;
void *ptr_imag = tensor.base + idx * tensor.batch_stride + tensor.imag_stride;
cudaError_t error = cudaMemcpy(
tensor.ptr_real + idx,
&ptr_real,
sizeof(void *),
cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to copy pointer to device memory");
}
error = cudaMemcpy(
tensor.ptr_imag + idx,
&ptr_imag,
sizeof(void *),
cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to copy pointer to device memory");
}
}
}
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMM operations
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
//
// Execute the planar complex array GEMM kernel via the CUTLASS Library's
// dispatch routines.
//
// Note, for planar complex array GEMM kernels, all numeric type arguments
// specify the data type of the base real types. These are understood to
// apply to planar complex representations of matrices in memory and to complex<T>
// structures for scalars.
//
// See tools/library/include/cutlass/library/handle.h for more details.
//
result.status = handle.gemm_planar_complex_array(
problem_size.m(), // expected GEMM M dimension
problem_size.n(), // expected GEMM N dimension
problem_size.k(), // expected GEMM K dimension
batch_count, // Number of batched elements
nullptr,
nullptr,
nullptr,
cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued accumulation
cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued alpha/beta scalars
&options.alpha, // Pointer to alpha scalar, of type complex<T>
cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued A matrix
cutlass::library::LayoutTypeID::kColumnMajor, // Layout of A matrix
cutlass::library::ComplexTransform::kConjugate, // Complex transformation on A matrix operand
ptr_A_real.get(), // Pointer to array of pointers to real part of A matrix
ptr_A_imag.get(), // Pointer to array of pointers to imaginary part of A matrix
lda, // Leading dimension of real part of A matrix
lda, // Leading dimension of imaginary part of A matrix
cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued B matrix
cutlass::library::LayoutTypeID::kColumnMajor, // Layout of B matrix
cutlass::library::ComplexTransform::kNone, // Complex transformation on B matrix operand
ptr_B_real.get(), // Pointer to array of pointers to real part of B matrix
ptr_B_imag.get(), // Pointer to array of pointers to imaginary part of B matrix
ldb, // Leading dimension of real part of B matrix
ldb, // Leading dimension of imaginary part of B matrix
&options.beta, // Pointer to beta scalar, of type complex<T>
cutlass::library::NumericTypeID::kF16, // Base data type of complex valued C and D matrices
ptr_C_real.get(), // Pointer to array of pointers to real part of C matrix
ptr_C_imag.get(), // Pointer to array of pointers to imaginary part of C matrix
ldc, // Leading dimension of real part of C matrix
ldc, // Leading dimension of imaginary part of C matrix
ptr_D_real.get(), // Pointer to array of pointers to real part of D matrix
ptr_D_imag.get(), // Pointer to array of pointers to imaginary part of D matrix
ldd, // Leading dimension of real part of D matrix
ldd // Leading dimension of imaginary part of D matrix
);
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS internal error - configuration not supported" << std::endl;
return result;
}
}
//
// Stop profiling loop
//
// Record an event when the GEMM operations have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
if (handle.get_last_operation()) {
std::cout << "Recently executed '" << handle.get_last_operation()->description().name << "'" << std::endl;
}
//
// Compute reference in device code
//
if (options.reference_check) {
result.passed = true;
for (int64_t idx = 0; result.passed && idx < int64_t(batch_count); ++idx) {
cutlass::reference::device::GemmPlanarComplex<
Element, LayoutA,
Element, LayoutB,
Element, LayoutC,
ElementAccumulator
>(
problem_size,
options.alpha,
{tensor_A.get() + idx * batch_stride_A, lda, imag_stride_A},
cutlass::ComplexTransform::kConjugate,
{tensor_B.get() + idx * batch_stride_B, ldb, imag_stride_B},
cutlass::ComplexTransform::kNone,
options.beta,
{tensor_C.get() + idx * batch_stride_C, ldc, imag_stride_C},
{tensor_D_ref.get() + idx * batch_stride_D, ldd, imag_stride_D}
);
Element epsilon = 0.1_hf;
Element nonzero_floor = 0.1_hf;
result.passed = cutlass::reference::device::BlockCompareRelativelyEqual(
tensor_D.get() + idx * batch_stride_D,
tensor_D_ref.get() + idx * batch_stride_D,
batch_stride_D,
epsilon,
nonzero_floor
);
}
if (result.passed) {
std::cout << "Reference check passed." << std::endl;
}
else {
std::cerr << "Error - reference check failed." << std::endl;
}
}
std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " GFLOPs: " << result.gflops << std::endl;
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
//
// This example uses mma.sync to directly access Tensor Cores to achieve peak performance.
//
// Volta Tensor Core operations are first available in CUDA 10.1 Toolkit.
//
// Turing Tensor Core operations are first available in CUDA 10.2 Toolkit.
//
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (props.major < 7) {
std::cerr << "Tensor Core operations must be run on a machine with compute capability at least 70."
<< std::endl;
// Returning zero so this passes on older architectures. Its actions are no-op.
return 0;
}
else if (props.major == 7 && props.minor <= 2) {
//
// If running on the Volta architecture, at least CUDA 10.1 Toolkit is required to run this example.
//
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Volta Tensor Core operations must be compiled with CUDA 10.1 Toolkit or later." << std::endl;
// Returning zero so this passes on older Toolkits. Its actions are no-op.
return 0;
}
}
else if (props.major == 7 && props.minor >= 5) {
//
// If running on the Turing architecture, at least CUDA 10.2 Toolkit is required to run this example.
//
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
// Returning zero so this passes on older Toolkits. Its actions are no-op.
return 0;
}
}
else {
// NVIDIA Ampere Architecture GPUs (SM80 and later) are fully supported on CUDA 11 Toolkit and beyond.
//
// fall through
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
TestbedPlanarComplex testbed(options);
Result result = testbed.profile(options);
return result.passed ? 0 : -1;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/11_planar_complex_array/planar_complex_array.cu/0 | {
"file_path": "examples/11_planar_complex_array/planar_complex_array.cu",
"repo_id": "examples",
"token_count": 9173
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/transform/threadblock/predicated_vector_access_iterator.h"
#include "cutlass/transform/threadblock/vector_iterator.h"
#include "cutlass/transform/warp/vector_fragment_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "kernel/b2b_implicit_gemm_convolution.h"
#include "threadblock/b2b_implicit_gemm_pipelined.h"
#include "threadblock/b2b_implicit_gemm_multistage.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
bool SmemAccumulator = false
> struct DefaultB2bConv2dFprop;
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop.h",
"repo_id": "examples",
"token_count": 1110
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "threadblock/b2b_mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape0_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA0_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA0_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA0,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB0_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB0_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB0,
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape1_,
/// Iterates over the intermediate accumulator tile
// (concept::MmaTensorOpFragmentIterator)
typename FragmentIteratorA1_,
/// Iterates over vectors of scale and bias vector in global memory
// (concept: VectorIterator)
typename IteratorAccumulatorScaleBias_,
/// WarpIterator to load Scale or Bias vector from threadblock fragment
typename FragmentIteratorA1ScaleBias_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB1_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB1_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB1,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Output operator for 1st Gemm(concept: epilogue::thread::LinearCombinationClamp, etc...)
typename OutputOp_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy0_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy1_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class B2bMmaMultistage :
public B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages> {
public:
///< Base class
using Base = B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape0 = Shape0_;
///< Iterates over tiles of A operand in global memory
using IteratorA0 = IteratorA0_;
using IteratorA = IteratorA0;
///< Iterates over tiles of B operand in global memory
using IteratorB0 = IteratorB0_;
using IteratorB = IteratorB0;
///< Policy describing tuning details
using Policy0 = Policy0_;
using SmemIteratorA0 = SmemIteratorA0_;
using SmemIteratorB0 = SmemIteratorB0_;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape1 = Shape1_;
///< Iterates over intermediate accumulator tile
using FragmentIteratorA1 = FragmentIteratorA1_;
///< Iterates over tiles of the scale and bias vectors in global memory
using IteratorAccumulatorScaleBias = IteratorAccumulatorScaleBias_;
///< WarpIterator to load Scale or Bias vector from threadblock fragment
using FragmentIteratorA1ScaleBias = FragmentIteratorA1ScaleBias_;
///< Iterates over tiles of B operand in global memory
using IteratorB1 = IteratorB1_;
///< Policy describing tuning details
using Policy1 = Policy1_;
///< Export Policy0 as the threadblock-level Mma's policy
using Policy = Policy0;
using Shape = Shape0;
using SmemIteratorB1 = SmemIteratorB1_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Epilogue after 1st Gemm
using OutputOp = OutputOp_;
static const bool PerChannelScale = (OutputOp::kScale ==
epilogue::thread::ScaleType::OnlyAlphaPerChannelScaling);
static cutlass::arch::CacheOperation::Kind const kCacheOpA0 = CacheOpA0;
static cutlass::arch::CacheOperation::Kind const kCacheOpB0 = CacheOpB0;
static cutlass::arch::CacheOperation::Kind const kCacheOpB1 = CacheOpB1;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC0 = typename Policy0::Operator::FragmentC;
/// Warp-level Mma
using Operator0 = typename Policy0::Operator;
/// Fragment of Scale and Bias loaded from global memory
using FragmentA1ScaleBias = typename IteratorAccumulatorScaleBias::Fragment;
/// Fragment of accumulator tile
using FragmentC1 = typename Policy1::Operator::FragmentC;
/// Warp-level Mma
using Operator1 = typename Policy1::Operator;
/// Minimum architecture is Sm80 to support cp.async
using ArchTag = arch::Sm80;
/// Complex transform on A operand
static ComplexTransform const kTransformA0 = Operator0::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB0 = Operator0::kTransformB;
/// Complex transform on B operand
static ComplexTransform const kTransformB1 = Operator1::kTransformB;
/// Complex transform exports needed by higher-level kernels
static ComplexTransform const kTransformA = kTransformA0;
static ComplexTransform const kTransformB = kTransformB0;
/// Internal structure exposed for introspection.
struct Detail {
static_assert(Base::kWarpGemmIterations0 > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
static_assert(Base::kWarpGemmIterations1 > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
/// Number of cp.async instructions to load one stage of operand A
static int const TBLoadIterationsA0 =
IteratorA0::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const TBLoadIterationsB0 =
IteratorB0::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const TBLoadIterationsB1 =
IteratorB1::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA0 =
(TBLoadIterationsA0 + Base::kWarpGemmIterations0 - 1) / Base::kWarpGemmIterations0;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB0 =
(TBLoadIterationsB0 + Base::kWarpGemmIterations0 - 1) / Base::kWarpGemmIterations0;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB1 =
(TBLoadIterationsB1 + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1;
};
private:
using WarpLoadedFragmentA0 = typename Operator0::FragmentA;
using WarpLoadedFragmentB0 = typename Operator0::FragmentB;
/// Warp Fragment of operand A1 loaded from accmulator tile
using WarpLoadedFragmentA1 = typename FragmentIteratorA1::Fragment;
using WarpLoadedFragmentA1ScaleBias =
typename FragmentIteratorA1ScaleBias::Fragment;
using WarpLoadedFragmentB1 = typename Operator1::FragmentB;
using WarpTransformedFragmentA0 = typename Operator0::TransformedFragmentA;
using WarpTransformedFragmentB0 = typename Operator0::TransformedFragmentB;
using WarpTransformedFragmentA1 = typename Operator1::TransformedFragmentA;
using WarpTransformedFragmentB1 = typename Operator1::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA0 smem_iterator_A0_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB0 smem_iterator_B0_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB1 smem_iterator_B1_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
B2bMmaMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::B2bMmaSharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx,
///< GEMM0 N is used for accumulator extent
int problem_size_0_n
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A0_(shared_storage.shared_storage0.operand_A_ref(), thread_idx),
smem_iterator_B0_(shared_storage.shared_storage0.operand_B_ref(), thread_idx),
smem_iterator_B1_(shared_storage.shared_storage1.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN);
int warp_idx_k = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount0::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount0::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A0_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations0 * warp_idx_k});
this->warp_tile_iterator_B0_.add_tile_offset(
{Base::kWarpGemmIterations0 * warp_idx_k, warp_idx_n});
this->warp_tile_iterator_B1_.add_tile_offset(
{Base::kWarpGemmIterations1 * warp_idx_k, warp_idx_n});
}
CUTLASS_DEVICE
void copy_tiles_and_advance_0(IteratorA0 &iterator_A0, IteratorB0 &iterator_B0,
int group_start_A0 = 0, int group_start_B0 = 0) {
iterator_A0.set_iteration_index(group_start_A0 *
IteratorA0::kAccessesPerVector);
this->smem_iterator_A0_.set_iteration_index(group_start_A0);
// Load for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA0; ++j) {
if (group_start_A0 + j < Detail::TBLoadIterationsA0) {
typename IteratorA0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA0::AccessType *>(
this->smem_iterator_A0_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA0::Element>::value *
IteratorA0::ThreadMap::kElementsPerAccess /
IteratorA0::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA0::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_A0.get();
cutlass::arch::cp_async<kSrcBytes, kCacheOpA0>(
dst_ptr + v, gmem_ptr, iterator_A0.valid());
++iterator_A0;
}
++this->smem_iterator_A0_;
}
}
iterator_B0.set_iteration_index(group_start_B0 *
IteratorB0::kAccessesPerVector);
this->smem_iterator_B0_.set_iteration_index(group_start_B0);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB0; ++j) {
if (group_start_B0 + j < Detail::TBLoadIterationsB0) {
typename IteratorB0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB0::AccessType *>(
this->smem_iterator_B0_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB0::Element>::value *
IteratorB0::ThreadMap::kElementsPerAccess /
IteratorB0::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB0::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B0.get();
cutlass::arch::cp_async<kSrcBytes, kCacheOpB0>(
dst_ptr + v, gmem_ptr, iterator_B0.valid());
++iterator_B0;
}
++this->smem_iterator_B0_;
}
}
}
CUTLASS_DEVICE
void copy_tiles_and_advance_1(IteratorB1 &iterator_B1,
int group_start_B1 = 0) {
iterator_B1.set_iteration_index(group_start_B1 *
IteratorB1::kAccessesPerVector);
this->smem_iterator_B1_.set_iteration_index(group_start_B1);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB1; ++j) {
if (group_start_B1 + j < Detail::TBLoadIterationsB1) {
typename IteratorB1::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB1::AccessType *>(
this->smem_iterator_B1_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value *
IteratorB1::ThreadMap::kElementsPerAccess /
IteratorB1::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B1.get();
cutlass::arch::cp_async<kSrcBytes, kCacheOpB1>(
dst_ptr + v, gmem_ptr, iterator_B1.valid());
++iterator_B1;
}
++this->smem_iterator_B1_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations_0,
///< destination accumulator tile
FragmentC1 &accum,
///< iterator over A0 operand in global memory
IteratorA0 iterator_A0,
///< iterator over B0 operand in global memory
IteratorB0 iterator_B0,
///< iterator over A1 operand scale vector in global memory
IteratorAccumulatorScaleBias iterator_A1_scale,
///< iterator over A1 operand bias vector in global memory
IteratorAccumulatorScaleBias iterator_A1_bias,
///< iterator over B1 operand in global memory
IteratorB1 iterator_B1,
///< initial value of accumulator
FragmentC0 const &src_accum,
///< epilogue operation after 1st Gemm
OutputOp output_op_0)
{
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations_0) {
iterator_A0.clear_mask(gemm_k_iterations_0 == 0);
iterator_B0.clear_mask(gemm_k_iterations_0 == 0);
iterator_A0.set_iteration_index(0);
this->smem_iterator_A0_.set_iteration_index(0);
// Load for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsA0; ++j) {
typename IteratorA0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA0::AccessType *>(
this->smem_iterator_A0_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA0::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA0::Element>::value *
IteratorA0::ThreadMap::kElementsPerAccess /
IteratorA0::kAccessesPerVector / 8;
int src_bytes = (iterator_A0.valid() ? kSrcBytes : 0);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA0>(
dst_ptr + v, iterator_A0.get(), iterator_A0.valid());
++iterator_A0;
}
++this->smem_iterator_A0_;
}
iterator_B0.set_iteration_index(0);
this->smem_iterator_B0_.set_iteration_index(0);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsB0; ++j) {
typename IteratorB0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB0::AccessType *>(
this->smem_iterator_B0_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB0::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB0::Element>::value *
IteratorB0::ThreadMap::kElementsPerAccess /
IteratorB0::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB0>(
dst_ptr + v, iterator_B0.get(), iterator_B0.valid());
++iterator_B0;
}
++this->smem_iterator_B0_;
}
// Move to the next stage
iterator_A0.add_tile_offset({0, 1});
iterator_B0.add_tile_offset({1, 0});
this->smem_iterator_A0_.add_tile_offset({0, 1});
this->smem_iterator_B0_.add_tile_offset({1, 0});
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
FragmentC0 accum0 = src_accum;
// DEPBAR+SYNC
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA0 warp_loaded_frag_A0[2];
WarpLoadedFragmentB0 warp_loaded_frag_B0[2];
WarpTransformedFragmentA0 warp_transformed_frag_A0[2];
WarpTransformedFragmentB0 warp_transformed_frag_B0[2];
Operator0 warp_mma0;
this->warp_tile_iterator_A0_.set_kgroup_index(0);
this->warp_tile_iterator_B0_.set_kgroup_index(0);
this->warp_tile_iterator_A0_.load(warp_loaded_frag_A0[0]);
this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[0]);
++this->warp_tile_iterator_A0_;
++this->warp_tile_iterator_B0_;
iterator_A0.clear_mask(gemm_k_iterations_0 == 0);
iterator_B0.clear_mask(gemm_k_iterations_0 == 0);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma0.transform(warp_transformed_frag_A0[0], warp_transformed_frag_B0[0],
warp_loaded_frag_A0[0], warp_loaded_frag_B0[0]);
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations_0 > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);
this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);
this->warp_tile_iterator_A0_.load(warp_loaded_frag_A0[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A0_;
++this->warp_tile_iterator_B0_;
if (warp_mma_k > 0)
warp_mma0.transform(warp_transformed_frag_A0[warp_mma_k % 2],
warp_transformed_frag_B0[warp_mma_k % 2],
warp_loaded_frag_A0[warp_mma_k % 2],
warp_loaded_frag_B0[warp_mma_k % 2]);
warp_mma0(
accum0,
warp_transformed_frag_A0[warp_mma_k % 2],
warp_transformed_frag_B0[warp_mma_k % 2],
accum0
);
// Issue global->shared copies for the this stage
if (warp_mma_k < Base::kWarpGemmIterations0 - 1) {
int group_start_iteration_A0, group_start_iteration_B0;
group_start_iteration_A0 = warp_mma_k * Detail::kAccessesPerGroupA0;
group_start_iteration_B0 = warp_mma_k * Detail::kAccessesPerGroupB0;
copy_tiles_and_advance_0(iterator_A0, iterator_B0, group_start_iteration_A0,
group_start_iteration_B0);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations0) {
int group_start_iteration_A0, group_start_iteration_B0;
group_start_iteration_A0 =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA0;
group_start_iteration_B0 =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB0;
copy_tiles_and_advance_0(iterator_A0, iterator_B0, group_start_iteration_A0,
group_start_iteration_B0);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A0.add_tile_offset({0, 1});
iterator_B0.add_tile_offset({1, 0});
this->smem_iterator_A0_.add_tile_offset({0, 1});
this->smem_iterator_B0_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A0_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B0_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A0_.add_tile_offset(
{0, -Base::kStages * Policy0::kPartitionsK *
Base::kWarpGemmIterations0});
this->warp_tile_iterator_B0_.add_tile_offset(
{-Base::kStages * Policy0::kPartitionsK *
Base::kWarpGemmIterations0,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations_0;
iterator_A0.clear_mask(gemm_k_iterations_0 == 0);
iterator_B0.clear_mask(gemm_k_iterations_0 == 0);
}
// Do any conversions feeding the first stage at the end of the loop so
// we can start right away on mma instructions
if (warp_mma_k + 1 == Base::kWarpGemmIterations0)
warp_mma0.transform(warp_transformed_frag_A0[(warp_mma_k + 1) % 2],
warp_transformed_frag_B0[(warp_mma_k + 1) % 2],
warp_loaded_frag_A0[(warp_mma_k + 1) % 2],
warp_loaded_frag_B0[(warp_mma_k + 1) % 2]);
}
}
// Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
// 2nd Gemm
/// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile
FragmentIteratorA1 warp_tile_iterator_A1_(accum0);
FragmentA1ScaleBias tb_frag_A1_scale;
FragmentA1ScaleBias tb_frag_A1_bias;
FragmentIteratorA1ScaleBias warp_tile_iterator_A1_scale_(tb_frag_A1_scale);
FragmentIteratorA1ScaleBias warp_tile_iterator_A1_bias_(tb_frag_A1_bias);
if(PerChannelScale) {
tb_frag_A1_scale.clear();
iterator_A1_scale.load(tb_frag_A1_scale);
++iterator_A1_scale;
}
tb_frag_A1_bias.clear();
iterator_A1_bias.load(tb_frag_A1_bias);
++iterator_A1_bias;
//
// Prologue
//
int gemm_k_iterations_1 = (FragmentIteratorA1::Policy::kIterations + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1;
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations_1) {
iterator_B1.clear_mask(gemm_k_iterations_1 == 0);
iterator_B1.set_iteration_index(0);
this->smem_iterator_B1_.set_iteration_index(0);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) {
typename IteratorB1::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB1::AccessType *>(
this->smem_iterator_B1_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB1::Element>::value *
IteratorB1::ThreadMap::kElementsPerAccess /
IteratorB1::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>(
dst_ptr + v, iterator_B1.get(), iterator_B1.valid());
++iterator_B1;
}
++this->smem_iterator_B1_;
}
// Move to the next stage
iterator_B1.add_tile_offset({1, 0});
this->smem_iterator_B1_.add_tile_offset({1, 0});
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// DEPBAR+SYNC
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA1 warp_loaded_frag_A1[2];
WarpLoadedFragmentA1ScaleBias warp_loaded_frag_A1_scale[2];
WarpLoadedFragmentA1ScaleBias warp_loaded_frag_A1_bias[2];
WarpLoadedFragmentB1 warp_loaded_frag_B1[2];
WarpTransformedFragmentA1 warp_transformed_frag_A1[2];
WarpTransformedFragmentB1 warp_transformed_frag_B1[2];
Operator1 warp_mma1;
if(PerChannelScale) {
warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[0]);
++warp_tile_iterator_A1_scale_;
}
warp_tile_iterator_A1_bias_.load(warp_loaded_frag_A1_bias[0]);
++warp_tile_iterator_A1_bias_;
warp_tile_iterator_A1_.load(warp_loaded_frag_A1[0],
warp_loaded_frag_A1_scale[0],
warp_loaded_frag_A1_bias[0],
output_op_0);
++warp_tile_iterator_A1_;
this->warp_tile_iterator_B1_.set_kgroup_index(0);
this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[0]);
++this->warp_tile_iterator_B1_;
iterator_B1.clear_mask(gemm_k_iterations_1 == 0);
smem_write_stage_idx = Base::kStages - 1;
smem_read_stage_idx = 0;
warp_mma1.transform(warp_transformed_frag_A1[0], warp_transformed_frag_B1[0],
warp_loaded_frag_A1[0], warp_loaded_frag_B1[0]);
//
// Mainloop
//
gemm_k_iterations_1 = (FragmentIteratorA1::Policy::kIterations + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1 - (Base::kStages - 1);
CUTLASS_PRAGMA_UNROLL
for (; gemm_k_iterations_1 > (-Base::kStages + 1); gemm_k_iterations_1--) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1;
++warp_mma_k) {
// Load threadblock-level scale/bias vector from global memory
if (warp_mma_k + 1 == Base::kWarpGemmIterations1) {
if(PerChannelScale) {
tb_frag_A1_scale.clear();
iterator_A1_scale.load(tb_frag_A1_scale);
++iterator_A1_scale;
}
tb_frag_A1_bias.clear();
iterator_A1_bias.load(tb_frag_A1_bias);
++iterator_A1_bias;
}
// Load warp-level scale bias fragment from threadblock scale/bias vector
if(PerChannelScale) {
warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]);
++warp_tile_iterator_A1_scale_;
}
warp_tile_iterator_A1_bias_.load(warp_loaded_frag_A1_bias[(warp_mma_k + 1) % 2]);
++warp_tile_iterator_A1_bias_;
// Load warp-level tile from accumulator fragment
warp_tile_iterator_A1_.load(warp_loaded_frag_A1[(warp_mma_k + 1) % 2],
warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2],
warp_loaded_frag_A1_bias[(warp_mma_k + 1) % 2],
output_op_0);
++warp_tile_iterator_A1_;
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1);
this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_B1_;
if (warp_mma_k > 0)
warp_mma1.transform(warp_transformed_frag_A1[warp_mma_k % 2],
warp_transformed_frag_B1[warp_mma_k % 2],
warp_loaded_frag_A1[warp_mma_k % 2],
warp_loaded_frag_B1[warp_mma_k % 2]);
warp_mma1(
accum,
warp_transformed_frag_A1[warp_mma_k % 2],
warp_transformed_frag_B1[warp_mma_k % 2],
accum
);
// Issue global->shared copies for the this stage
if (warp_mma_k < Base::kWarpGemmIterations1 - 1) {
int group_start_iteration_B1;
group_start_iteration_B1 = warp_mma_k * Detail::kAccessesPerGroupB1;
copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations1) {
int group_start_iteration_B1;
group_start_iteration_B1 =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB1;
copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_B1.add_tile_offset({1, 0});
this->smem_iterator_B1_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_B1_.add_tile_offset(
{-Base::kStages * Policy1::kPartitionsK *
Base::kWarpGemmIterations1,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
iterator_B1.clear_mask(gemm_k_iterations_1 == 1);
}
// Do any conversions feeding the first stage at the end of the loop so
// we can start right away on mma instructions
if (warp_mma_k + 1 == Base::kWarpGemmIterations1)
warp_mma1.transform(warp_transformed_frag_A1[(warp_mma_k + 1) % 2],
warp_transformed_frag_B1[(warp_mma_k + 1) % 2],
warp_loaded_frag_A1[(warp_mma_k + 1) % 2],
warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
}
}
// Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/threadblock/b2b_mma_multistage.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/threadblock/b2b_mma_multistage.h",
"repo_id": "examples",
"token_count": 15041
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h)
data types in tensor cores. One big advantage is that we can load in F32 data and convert them
implicitly to tf32 inside the SYMM kernel which means no change is needed to accelerate traditional
F32 data by using NVIDIA Ampere architecture.
We can use the tf32 mode of tensor core to emulate a fast accurate SYMM kernel which is accelerated
using Ampere Tensor Cores (see include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h).
The trick is very simple
a x b = (a_big + a_small) x (b_big + b_small) = a_big x b_big + a_big x b_small + a_small x b_big
big = convert_to_tf32(F32)
small = convert_to_tf32(F32 - big)
a_small x b_small is discarded because they are too small.
This example demonstrates usage of this kernel, along with accuracy measurements w.r.t. actual F32
results (SSYMM from cuBLAS) and against F64 results (DSYMM from CUTLASS)
To enable this feature, the only change needs to make is to change the default OpMultiplyAdd to
OpMultiplyAddFastF32.
Now, we have two different flavors of SSYMM in the profiler for Ampere:
s1688symm // Use 3xTF32 to emulate F32. F32 in, converted in TF32-big and TF32-small internally,
// accumulated in F32, F32 out.
s1688tf32symm // Use 1xTF32. F32 in, converted to one TF32 internally, accumulated in F32, F32 out.
*/
#include <iostream>
#include <vector>
#include <limits>
#include "cutlass/blas3.h"
#include "cutlass/gemm/device/symm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/symm.h"
#include "cutlass/util/reference/host/tensor_reduce.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
#if CUTLASS_ENABLE_CUBLAS
#include <cublas_v2.h>
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
float alpha;
float beta;
std::string rand_mode;
int seed;
Options():
help(false),
problem_size({4096, 4096, 4096}),
seed(1),
alpha(1),
beta(),
rand_mode("uniform") { }
bool valid() {
//
// CUTLASS attempts to load 128b vectors of F32 elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 4 elements.
//
int const kAlignment = 4;
if ((problem_size.m() % kAlignment) ||
(problem_size.n() % kAlignment) ||
(problem_size.k() % kAlignment)) {
// misaligned tensors
return false;
}
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
// Since the kernels in this example are in Left Side Mode
cmd.get_cmd_line_argument("m", problem_size.k());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("seed", seed);
cmd.get_cmd_line_argument("rand_mode", rand_mode);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "33_ampere_3xtf32_tensorop_symm example\n\n"
<< " This example uses the CUTLASS Library to execute 3xTF32 tensorop SYMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> SYMM M dimension\n"
<< " --n=<int> SYMM N dimension\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --rand_mode=<string> gauss / uniform*\n\n"
<< " --seed=<int> Random number seed (1*)\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/33_ampere_3xtf32_tensorop_symm/33_ampere_3xtf32_tensorop_symm --m=1024 --n=512 \\\n"
<< " --alpha=2 --beta=1 \n\n";
return out;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Matrix B and Matrix C (since that's what cuBLAS supports, CUTLASS supports Row Major too)
using LayoutInputA = cutlass::layout::ColumnMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::ColumnMajor;
// Symmetric Matrix A is in Left Side mode
constexpr cutlass::SideMode SideModeA = cutlass::SideMode::kLeft;
// Symmetric Matrix A is in Lower Filled mode
constexpr cutlass::FillMode FillModeA = cutlass::FillMode::kLower;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 64, 16>; // <- threadblock tile M = 128, N = 128, K = 16
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 32, 16>; // <- warp tile M = 64, N = 64, K = 16
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
float, // <- data type of output matrix
128 / cutlass::sizeof_bits<float>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
float, // <- data type of accumulator
float>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 3;
// Alignment
constexpr int Alignment = 4;
//
// CUTLASS Symm Operators (SSYM: Symm_3xTF32, Symm_1xTF32, DSYMM: Symm_F64)
//
// Symm_3xTF32
using Symm_3xTF32 = cutlass::gemm::device::Symm<
float,
LayoutInputA,
SideModeA,
FillModeA,
float,
LayoutInputB,
float,
LayoutOutput,
float,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
1, // Symmetric matrix is always align 1
Alignment,
false,
cutlass::arch::OpMultiplyAddFastF32>;
// Symm_1xTF32
using Symm_1xTF32 = cutlass::gemm::device::Symm<
float,
LayoutInputA,
SideModeA,
FillModeA,
float,
LayoutInputB,
float,
LayoutOutput,
float,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
1, // Symmetric matrix is always align 1
Alignment,
false,
cutlass::arch::OpMultiplyAdd>;
// Symm_F64
using Symm_F64 = cutlass::gemm::device::Symm<
double,
LayoutInputA,
SideModeA,
FillModeA,
double,
LayoutInputB,
double,
LayoutOutput,
double,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 16, 16>,
cutlass::gemm::GemmShape<8, 8, 4>,
cutlass::epilogue::thread::LinearCombination<
double,
1,
double,
double
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
4>;
bool run(Options &options) {
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size = options.problem_size;
////////////////////////////////////////////////////////////////////////////////
/// 1. Initialize F32 Precision input tensors using CUTLASS helper functions
////////////////////////////////////////////////////////////////////////////////
cutlass::HostTensor<float, LayoutInputA> tensor_a_F32(problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<float, LayoutInputB> tensor_b_F32(problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<float, LayoutOutput> tensor_c_F32(problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<float, LayoutOutput> tensor_d_F32(problem_size.mn()); // <- Create matrix D with dimensions M x N
if (options.rand_mode == "uniform") {
const float min = -1;
const float max = 1;
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a_F32.host_view(),
options.seed,
double(max),
double(min)); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_F32.host_view(),
options.seed,
double(max),
double(min)); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_F32.host_view(),
options.seed,
double(max),
double(min)); // <- Fill matrix C on host with uniform-distribution random data
} else if (options.rand_mode == "gauss") {
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomGaussian(
tensor_a_F32.host_view(),
options.seed,
double(0),
double(5)); // <- Fill matrix A on host with gaussian-distribution random data
cutlass::reference::host::TensorFillRandomGaussian(
tensor_b_F32.host_view(),
options.seed,
double(0),
double(5)); // <- Fill matrix B on host with gaussian-distribution random data
cutlass::reference::host::TensorFillRandomGaussian(
tensor_c_F32.host_view(),
options.seed,
double(0),
double(5)); // <- Fill matrix C on host with gaussian-distribution random data
}
cutlass::reference::host::TensorFill(
tensor_d_F32.host_view()); // <- fill matrix D on host with zeros
// Copy data from host to GPU
tensor_a_F32.sync_device();
tensor_b_F32.sync_device();
tensor_c_F32.sync_device();
tensor_d_F32.sync_device();
////////////////////////////////////////////////////////////////////////////////
/// 2. Initialize F64 tensors, Output tensors and setup arguments
////////////////////////////////////////////////////////////////////////////////
// Symm F64 input operands (A, B, C)
cutlass::HostTensor<double, LayoutInputA> tensor_a_F64(problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<double, LayoutInputB> tensor_b_F64(problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<double, LayoutOutput> tensor_c_F64(problem_size.mn()); // <- Create matrix C with dimensions M x N
// Symm output (D) for SYMM_3xTF32
cutlass::HostTensor<float, LayoutOutput> tensor_d_3xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N
// Symm output (D) for SYMM_1xTF32
cutlass::HostTensor<float, LayoutOutput> tensor_d_1xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N
// Symm output (D) for SYMM_F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_F64(problem_size.mn()); // <- Create matrix D with dimensions M x N
#if CUTLASS_ENABLE_CUBLAS
// Symm output (D) for SYMM_cublasF32
cutlass::HostTensor<float, LayoutOutput> tensor_d_cublasF32(problem_size.mn()); // <- Create matrix D with dimensions M x N
#endif
// Copy values from the DP tensors
cutlass::reference::host::TensorCopy(tensor_a_F64.host_view(), tensor_a_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_b_F64.host_view(), tensor_b_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_c_F64.host_view(), tensor_c_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_F64.host_view(), tensor_d_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_3xTF32.host_view(), tensor_d_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_1xTF32.host_view(), tensor_d_F32.host_view());
#if CUTLASS_ENABLE_CUBLAS
cutlass::reference::host::TensorCopy(tensor_d_cublasF32.host_view(), tensor_d_F32.host_view());
#endif
// Copy data from host to GPU
tensor_a_F64.sync_device();
tensor_b_F64.sync_device();
tensor_c_F64.sync_device();
tensor_d_F64.sync_device();
tensor_d_3xTF32.sync_device();
tensor_d_1xTF32.sync_device();
#if CUTLASS_ENABLE_CUBLAS
tensor_d_cublasF32.sync_device();
#endif
// Initialize alpha and beta for dot product computation
float alpha = float(options.alpha);
float beta = float(options.beta);
// Batch count as 1
int batch_count = 1;
// Batch stride for A, when matrix A is in Left Side mode
int batch_stride_A = problem_size.m()*problem_size.m();
////////////////////////////////////////////////////////////////////////////////
/// 3. Run 3xTF32 kernel
////////////////////////////////////////////////////////////////////////////////
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Symm_3xTF32::Arguments arguments_3xtf32{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size, // <- problem size of matrix multiplication
batch_count, // <- batch count
{alpha, beta}, // <- tuple of alpha and beta
tensor_a_F32.device_data(), // <- reference to matrix A on device
tensor_b_F32.device_data(), // <- reference to matrix B on device
tensor_c_F32.device_data(), // <- reference to matrix C on device
tensor_d_3xTF32.device_data(), // <- reference to matrix D on device
batch_stride_A, // <- batch stride and ld for matrices
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
tensor_a_F32.layout().stride(0),
tensor_b_F32.layout().stride(0),
tensor_c_F32.layout().stride(0),
tensor_d_3xTF32.layout().stride(0)
};
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size_3xtf32 = Symm_3xTF32::get_workspace_size(arguments_3xtf32);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace_3xtf32(workspace_size_3xtf32);
// Instantiate CUTLASS kernel depending on templates
Symm_3xTF32 symm_op_3xtf32;
// Check the problem size is supported or not
cutlass::Status status_3xtf32 = symm_op_3xtf32.can_implement(arguments_3xtf32);
CUTLASS_CHECK(status_3xtf32);
// Initialize CUTLASS kernel with arguments and workspace pointer
status_3xtf32 = symm_op_3xtf32.initialize(arguments_3xtf32, workspace_3xtf32.get());
CUTLASS_CHECK(status_3xtf32);
// Launch initialized CUTLASS kernel
status_3xtf32 = symm_op_3xtf32();
CUTLASS_CHECK(status_3xtf32);
tensor_d_3xTF32.sync_host();
////////////////////////////////////////////////////////////////////////////////
/// 4. Run 1xTF32 kernel
////////////////////////////////////////////////////////////////////////////////
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Symm_1xTF32::Arguments arguments_1xtf32{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size, // <- problem size of matrix multiplication
batch_count, // <- batch count
{alpha, beta}, // <- tuple of alpha and beta
tensor_a_F32.device_data(), // <- reference to matrix A on device
tensor_b_F32.device_data(), // <- reference to matrix B on device
tensor_c_F32.device_data(), // <- reference to matrix C on device
tensor_d_1xTF32.device_data(), // <- reference to matrix D on device
batch_stride_A, // <- batch stride and ld for matrices
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
tensor_a_F32.layout().stride(0),
tensor_b_F32.layout().stride(0),
tensor_c_F32.layout().stride(0),
tensor_d_1xTF32.layout().stride(0)
};
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size_1xtf32 = Symm_1xTF32::get_workspace_size(arguments_1xtf32);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace_1xtf32(workspace_size_1xtf32);
// Instantiate CUTLASS kernel depending on templates
Symm_1xTF32 symm_op_1xtf32;
// Check the problem size is supported or not
cutlass::Status status_1xtf32 = symm_op_1xtf32.can_implement(arguments_1xtf32);
CUTLASS_CHECK(status_1xtf32);
// Initialize CUTLASS kernel with arguments and workspace pointer
status_1xtf32 = symm_op_1xtf32.initialize(arguments_1xtf32, workspace_1xtf32.get());
CUTLASS_CHECK(status_1xtf32);
// Launch initialized CUTLASS kernel
status_1xtf32 = symm_op_1xtf32();
CUTLASS_CHECK(status_1xtf32);
tensor_d_1xTF32.sync_host();
////////////////////////////////////////////////////////////////////////////////
/// 5. Run F64 kernel
////////////////////////////////////////////////////////////////////////////////
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Symm_F64::Arguments arguments_f64{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size, // <- problem size of matrix multiplication
batch_count, // <- batch count
{double(options.alpha), double(options.alpha)}, // <- tuple of alpha and beta
tensor_a_F64.device_data(), // <- reference to matrix A on device
tensor_b_F64.device_data(), // <- reference to matrix B on device
tensor_c_F64.device_data(), // <- reference to matrix C on device
tensor_d_F64.device_data(), // <- reference to matrix D on device
batch_stride_A, // <- batch stride and ld for matrices
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
tensor_a_F64.layout().stride(0),
tensor_b_F64.layout().stride(0),
tensor_c_F64.layout().stride(0),
tensor_d_F64.layout().stride(0)
};
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size_f64 = Symm_F64::get_workspace_size(arguments_f64);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace_f64(workspace_size_f64);
// Instantiate CUTLASS kernel depending on templates
Symm_F64 symm_op_f64;
// Check the problem size is supported or not
cutlass::Status status_f64 = symm_op_f64.can_implement(arguments_f64);
CUTLASS_CHECK(status_f64);
// Initialize CUTLASS kernel with arguments and workspace pointer
status_f64 = symm_op_f64.initialize(arguments_f64, workspace_f64.get());
CUTLASS_CHECK(status_f64);
// Launch initialized CUTLASS kernel
status_f64 = symm_op_f64();
CUTLASS_CHECK(status_f64);
cudaDeviceSynchronize();
tensor_d_F64.sync_host();
////////////////////////////////////////////////////////////////////////////////
/// 6. Run cuBLAS SSYMM kernel
////////////////////////////////////////////////////////////////////////////////
#if CUTLASS_ENABLE_CUBLAS
cublasStatus_t cublas_status;
cublasHandle_t handle;
cublas_status = cublasCreate(&handle);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
std::cerr << "Failed to create cuBLAS handle." << std::endl;
return false;
}
cublas_status = cublasSsymm(
handle,
CUBLAS_SIDE_LEFT,
CUBLAS_FILL_MODE_LOWER,
problem_size.m(),
problem_size.n(),
static_cast<const float*>(&alpha),
static_cast<const float*>(tensor_a_F32.device_data()),
int(tensor_a_F32.layout().stride(0)),
static_cast<const float*>(tensor_b_F32.device_data()),
int(tensor_b_F32.layout().stride(0)),
static_cast<const float*>(&beta),
static_cast<float*>(tensor_d_cublasF32.device_data()),
int(tensor_d_cublasF32.layout().stride(0))
);
cudaDeviceSynchronize();
tensor_d_cublasF32.sync_host();
#endif
////////////////////////////////////////////////////////////////////////////////
/// 7. Compute l2 norms
////////////////////////////////////////////////////////////////////////////////
#if CUTLASS_ENABLE_CUBLAS
// l2 norm cuBLAS F32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_cublasF32_in_F64(problem_size.mn());
cutlass::reference::host::TensorCopy(tensor_d_cublasF32_in_F64.host_view(), tensor_d_cublasF32.host_view());
double l2_norm_cublasf32_vs_f64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_cublasF32_in_F64.host_view(), tensor_d_F64.host_view());
#endif
// l2 norm 3xTF32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_3xTF32_in_F64(problem_size.mn());
cutlass::reference::host::TensorCopy(tensor_d_3xTF32_in_F64.host_view(), tensor_d_3xTF32.host_view());
double l2_norm_3xtf32_vs_f64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_3xTF32_in_F64.host_view(), tensor_d_F64.host_view());
// l2 norm 1xTF32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_1xTF32_in_F64(problem_size.mn());
cutlass::reference::host::TensorCopy(tensor_d_1xTF32_in_F64.host_view(), tensor_d_1xTF32.host_view());
double l2_norm_1xtf32_vs_f64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_1xTF32_in_F64.host_view(), tensor_d_F64.host_view());
#if CUTLASS_ENABLE_CUBLAS
// l2 norm 3xTF32 vs cuBLAS F32
double l2_norm_3xtf32_vs_cublasf32 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_3xTF32.host_view(), tensor_d_cublasF32.host_view());
#endif
// l2 norm 3xTF32 vs 1xTF32
double l2_norm_3xtf32_vs_1xtf32 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_3xTF32.host_view(), tensor_d_1xTF32.host_view());
///////////////////////////////////////////////////////////////////////////////
// Print kernel info and L2 norms
std::cout << "Problem Size: (" << problem_size.m() << "," << problem_size.n() << "," << problem_size.k() << ") "
<< "Alpha: " << alpha << "," << " Beta: " << beta << std::endl;
std::cout << std::fixed;
std::cout << "Normalized L2 norm of" << std::endl;
std::cout.precision(8);
std::cout << std::scientific
#if CUTLASS_ENABLE_CUBLAS
<< " - cuBLAS F32 error with F64 reference : " << l2_norm_cublasf32_vs_f64 << std::endl
#endif
<< " - 3xTF32 error with F64 reference : " << l2_norm_3xtf32_vs_f64 << std::endl
<< " - 1xTF32 error with F64 reference : " << l2_norm_1xtf32_vs_f64 << std::endl
#if CUTLASS_ENABLE_CUBLAS
<< " - 3xTF32 error with cuBLAS F32 reference : " << l2_norm_3xtf32_vs_cublasf32 << std::endl
#endif
<< " - 3xTF32 error with 1xTF32 reference : " << l2_norm_3xtf32_vs_1xtf32 << std::endl;
return true;
}
int main(int argc, const char **argv) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
bool result = true;
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
result = run(options);
if (!result) return -1;
return 0;
}
| examples/33_ampere_3xtf32_tensorop_symm/ampere_3xtf32_tensorop_symm.cu/0 | {
"file_path": "examples/33_ampere_3xtf32_tensorop_symm/ampere_3xtf32_tensorop_symm.cu",
"repo_id": "examples",
"token_count": 14585
} | 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief GEMM Permute Example.
This example computes batched GEMM operations with output results permuted as reshaped tensors.
We provide layout plugin as a flexible tool for users to add any customized input/output tensor permute operation,
or any other generalized global memory writeout address computation. To add a customized layout, add new class
in include/cutlass/layout/permute.h
In this example we use several permute operations (permute([0, 2, 1, 3]))
In this example, we used Tensor4DPermuteBMM0213 layout to perform Batched GEMM with permute([0, 2, 1, 3]) on BMM
whole output tensor, and used Tensor5DPermute20314 layout to perform Normal GEMM with permute([2, 0, 3, 1, 4]) on
output matrix. The address computations are performed in compute(col_init, row_init, stride_init,
BMM_batch_idx) with {col_permute, row_permute and stride_permute} as new addresses after permute op.
(check include/cutlass/layout/permute.h)
Tips:
1) Make sure to set batch_stride to zero for BMM permute; also the BMM GEMM should be in mode
cutlass::gemm::GemmUniversalMode::kBatched instead of kArray.
2) When the contiguous dimension is touched in permute op (for example [0, 2, 3, 1] for row-major matrix
or [1, 0, 2, 3] for column-major), Alignment should be set to 1 for the corresponding matrix.
If the last dimension is untouched, one can set Alignment to be larger like 8 in our example.
As a result, permute op without touching the unit stride dimension is recommended to obtain the best performance.
Examples:
# Runs a batched GEMM with 96 batches
$ ./examples/39_gemm_permute/39_gemm_permute --problem-count=96
# Runs a batched GEMM with 96 batches (with GEMM-K dimension equal to 1024)
$ ./examples/39_gemm_permute/39_gemm_permute --problem-count=96 --k=1024 --verbose=true
# Execute batched GEMM and profile with NSight
$ nv-nsight-cu-cli ./examples/39_gemm_permute/39_gemm_permute --m=256 --n=192 --k=256 --verbose=true --iterations=1 --reference-check=false
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <map>
#include <unordered_map>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/device/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/layout/permute.h"
#include "layouts.h"
#include "permute_info.h"
/// Tensor4DPermuteBMM0213 --->
/// Permute layout function for 4-D permuted tensors for BMM with BMM tensor (dimension as [B, M, N]) reshaped
/// as [B/D1, D1, M, N]. Then perform permute([0, 2, 1, 3]) on the corresponding whole BMM tensor.
int constexpr D1 = 12;
/// Tensor5DPermute20314 --->
/// Permute layout function for 5-D permuted tensors with matrix (dimension as [M, N]) reshaped
/// as [M/T1, T1, T2, T3, N/T2/T3]. Then perform permute([2, 0, 3, 1, 4]) on the corresponding tensor.
int constexpr T1 = 16;
int constexpr T2 = 3;
int constexpr T3 = 8;
/// Tensor4DPermute0213 --->
/// Permute layout function for 4-D permuted tensors with matrix (dimension as [M, N]) reshaped
/// as [M/S1, S1, S2, N/S2]. Then perform permute([0, 2, 1, 3]) on the corresponding tensor.
int constexpr S1 = 8;
int constexpr S2 = 4;
// // // Alignments
int constexpr AlignmentA = 8;
int constexpr AlignmentB = 8;
int constexpr AlignmentC = 8;
/// GEMM element types
using ElementInput = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Useful macros
#define CHECK_CUDA_CALL(call, handler) \
do { \
cudaError_t __err = (call); \
if (__err != cudaSuccess) { \
std::cerr << #call " failed: " << cudaGetErrorString(__err) << std::endl; \
handler; \
} \
} while(0)
#define CHECK_CUTLASS_CALL(call, handler) \
do { \
cutlass::Status __status = (call); \
if (__status != cutlass::Status::kSuccess) { \
std::cerr << #call " failed: " << cutlass::cutlassGetStatusString(__status) << std::endl; \
handler; \
} \
} while(0)
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
bool error;
bool reference_check;
cutlass::gemm::GemmCoord problem_each;
int batch_count;
int iterations;
int cuda_streams;
bool verbose;
float alpha;
float beta;
//
// Methods
//
Options():
help(false),
error(false),
reference_check(true),
batch_count(-1),
iterations(20),
cuda_streams(0),
verbose(false),
alpha(1),
beta()
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("alpha", alpha, 1.0f);
cmd.get_cmd_line_argument("beta", beta, 0.0f);
cmd.get_cmd_line_argument("iterations", iterations, 20);
cmd.get_cmd_line_argument("streams", cuda_streams, 0);
cmd.get_cmd_line_argument("verbose", verbose, false);
cmd.get_cmd_line_argument("reference-check", reference_check, true);
int m, n, k;
cmd.get_cmd_line_argument("m", m, 384);
cmd.get_cmd_line_argument("n", n, 192);
cmd.get_cmd_line_argument("k", k, 384);
cmd.get_cmd_line_argument("batch-count", batch_count, 96);
problem_each = cutlass::gemm::GemmCoord(m, n, k);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out <<
"39_gemm_permute\n"
"\n"
" This example tests and profiles the performance of normal GEMM and batched GEMM with different"
" combinations of fused permutations of input and output tensors."
"\n"
" Permutations considered in this example:\n"
"\n"
" Normal GEMM:\n"
" 1) Tensor4DPermute0213: matrix of shape [X, Y] is reshaped as [X/S1, S1, S2, Y/S2] and has its dimensions"
" permuted as [0, 2, 1, 3], resulting in shape [X/S1, S2, S1, Y/S2] viewed as matrix of shape [X*S2/S1, Y*S1/S2].\n"
" 2) Tensor5DPermute20314: matrix of shape [X, Y] is reshaped as [X/T1, T1, T2, T3, Y/T2/T3] and has its dimensions"
" permuted as [2, 0, 3, 1, 4], resulting in shape [T2, X/T1, T3, T1, Y/T2/T3] viewed as matrix of shape [X*T2/T1, Y*T1/T2].\n"
"\n"
" Batched GEMM:\n"
" 3) Tensor4DPermuteBMM0213: batched tensor of 3D shape [B, X, Y] is reshaped as 4D shape [B/D1, D1, X, Y]"
" and has its dimensions permuted as [0, 2, 1, 3], resulting in shape [B/D1, X, D1, Y] viewed as"
" a matrix of shape [B/D1, X, Y*D1] for batched GEMM purposes.\n"
"\n"
" Note: S1, S2, D1, D2, T1, T2, T3 are compile-time constants defined in gemm_permute.cu."
" Runtime specification of these values is not supported."
" These values along with alignment requirements place constraints on supported matrix sizes.\n"
"\n"
" Note: X, Y above may refer to M, N or K dimensions of GEMM problem, depending on the tensor considered (A, B or D)."
" For the output tensor D the values correspond directly to dimensions of D, whereas for A and B the original dimensions"
" X', Y' are inferred from the ones supplied to the GEMM, taking into account the permute operation.\n"
"\n"
"Options:\n"
"\n"
" --help If specified, displays this usage statement.\n\n"
" --batch-count=<int> Sets the number of batches in batched GEMM (batch number for BMM). (default: --batch-count=768)\n"
" --m=<int> Sets the M dimension for both batched GEMM and normal GEMM problems. (default: --m=128)\n"
" --n=<int> Sets the N dimension for both batched GEMM and normal GEMM problems. (default: --n=192)\n"
" --k=<int> Sets the K dimension for both batched GEMM and normal GEMM problems. (default: --k=384)\n"
" --alpha=<f32> Epilogue scalar alpha (real part)\n"
" --beta=<f32> Epilogue scalar beta (real part)\n\n"
" --iterations=<int> Number of profiling iterations to perform.\n"
" --reference-check=<bool> If true, performs reference check.\n"
" --verbose=<bool> If true, prints problem sizes and batching structure.\n"
"\n"
"Examples:\n"
"\n"
"# Runs a batched GEMM with 96 batches\n"
"$ ./examples/39_gemm_permute/39_gemm_permute --batch-count=96\n"
"\n"
"# Runs a batched GEMM with 96 batches (with GEMM-K dimension equal to 1024)\n"
"$ ./examples/39_gemm_permute/39_gemm_permute --batch-count=96 --k=1024 --verbose=true\n"
"\n"
"# Execute batched GEMM and profile with NSight\n"
"$ nv-nsight-cu-cli ./examples/39_gemm_permute/39_gemm_permute --m=256 --n=192 --k=256 --verbose=true --iterations=1 --reference-check=false\n"
"\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s, bool batched) const {
// Number of real-valued multiply-adds
int64_t fmas = int64_t();
fmas += problem_each.product() * (batched ? batch_count : 1);
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace { // (anonymous)
/// Dimension-generic permutation loop
template<int I, typename Element, typename Layout, typename PermuteOp, typename Coord>
void permute_host_impl(
cutlass::TensorView<Element const, Layout> const & input,
cutlass::TensorView<Element, Layout> const & output,
PermuteOp && permute,
Coord & coord
) {
static_assert(Layout::kRank == Coord::kRank, "Incompatible Layout and Coord types");
if constexpr (I == Coord::kRank) {
output.at(permute(coord)) = input.at(coord);
}
else {
for (coord[I] = 0; coord[I] < input.extent(I); ++coord[I]) {
permute_host_impl<I+1>(input, output, std::forward<PermuteOp>(permute), coord);
}
}
}
} // namespace (anonymous)
/// Perform a reference (host-based) permutation of an input tensor
template<typename PermuteLayout, typename Element, typename Layout>
void permute_host(
cutlass::TensorView<Element const, Layout> const &input,
cutlass::TensorView<Element, Layout> const &output,
int batch_count) {
Layout layout = input.layout();
cutlass::MatrixCoord extent = input.extent();
std::size_t num_elems = layout.capacity(extent) * batch_count;
std::vector<Element> h_input(num_elems);
cutlass::device_memory::copy_to_host(h_input.data(), input.data(), num_elems);
std::vector<Element> h_output(num_elems);
using Info = PermuteInfo<PermuteLayout>;
using TensorLayout = typename Info::Layout;
auto shape_orig = Info::original_shape(extent, batch_count);
auto shape_perm = Info::permute(shape_orig);
cutlass::TensorView<Element const, TensorLayout> view_input(h_input.data(), TensorLayout::packed(shape_orig), shape_orig);
cutlass::TensorView<Element, TensorLayout> view_output(h_output.data(), TensorLayout::packed(shape_perm), shape_perm);
decltype(shape_orig) coord;
permute_host_impl<0>(view_input, view_output, Info::permute, coord);
cutlass::device_memory::copy_to_device(output.data(), h_output.data(), num_elems);
}
template<typename Layout>
struct LayoutInfo;
template<>
struct LayoutInfo<cutlass::layout::RowMajor> {
static std::string name() { return "RowMajor"; }
};
template<>
struct LayoutInfo<cutlass::layout::ColumnMajor> {
static std::string name() { return "ColumnMajor"; }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ElementA, typename ElementB, typename ElementC>
class Testbed {
private:
//
// Data members
//
Options & options;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint32_t seed;
cutlass::DeviceAllocation<ElementA> block_A;
cutlass::DeviceAllocation<ElementB> block_B;
cutlass::DeviceAllocation<ElementC> block_C;
cutlass::DeviceAllocation<ElementC> block_D;
public:
//
// Methods
//
Testbed(
Options &options_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3090
):
options(options_), init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
private:
/// Print permutation info for one tensor
template<typename PermuteLayout>
void print_tensor_info(
std::ostream & os,
std::string const &tensor_name,
int row_dim,
int col_dim) {
cutlass::MatrixCoord extent(options.problem_each.at(row_dim), options.problem_each.at(col_dim));
using Info = PermuteInfo<PermuteLayout>;
os << "tensor " << tensor_name << ": " << Info::desc() << "\n";
os << " extent: [" << extent.row() << ", " << extent.column() << "]";
if (Info::kBatched) {
os << ", batch count: " << options.batch_count;
}
os << "\n";
if (!cutlass::layout::is_trivial_permute<PermuteLayout>) {
auto shape_orig = Info::original_shape(extent, options.batch_count);
auto shape_perm = Info::permute(shape_orig);
os << " original: [" << shape_orig << "]\n";
os << " permuted: [" << shape_perm << "]\n";
}
}
/// Check shape compatibility for one tensor
template<typename Layout, typename PermuteLayout, int Alignment>
bool check_tensor_shape(
std::string const &tensor_name,
int row_dim,
int col_dim) {
cutlass::MatrixCoord extent(options.problem_each.at(row_dim), options.problem_each.at(col_dim));
using Info = PermuteInfo<PermuteLayout>;
auto rowAlign = cutlass::platform::is_same<Layout, cutlass::layout::ColumnMajor>::value ? Alignment : 1;
auto colAlign = cutlass::platform::is_same<Layout, cutlass::layout::RowMajor>::value ? Alignment : 1;
auto rowFactor = Info::kRowFactor * rowAlign;
auto colFactor = Info::kColumnFactor * colAlign;
// Assumes row-major layout
bool const valid_row = extent.row() % rowFactor == 0;
if (!valid_row) {
std::cerr << "Invalid tensor " << tensor_name << " row size = " << extent.row() << ", "
"must be divisible by " << rowFactor << ", "
"required by " << Info::name() <<
(rowAlign > 1 ? (" and alignment of " + std::to_string(rowAlign)) : "") << std::endl;
}
bool const valid_col = extent.column() % colFactor == 0;
if (!valid_col) {
std::cerr << "Invalid tensor " << tensor_name << " column size = " << extent.column() << ", "
"must be divisible by " << colFactor << ", "
"required by " << Info::name() <<
(colAlign > 1 ? (" and alignment of " + std::to_string(colAlign)) : "") << std::endl;
}
bool const valid_bsz = options.batch_count % Info::kBatchFactor == 0;
if (!valid_bsz) {
std::cerr << "Invalid batch count = " << options.batch_count << ", "
"must be divisible by " << Info::kBatchFactor << ", "
"required by " << Info::name() << std::endl;
}
return valid_row && valid_col && valid_bsz;
}
/// Helper to initialize a tensor view
template <typename Element>
void initialize_tensor_(
Element *ptr,
size_t capacity,
cutlass::Distribution::Kind dist_kind,
uint32_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) {
scope_max = 5;
scope_min = -5;
}
else {
scope_max = 8;
scope_min = -8;
}
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
ptr, capacity, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::device::BlockFillRandomGaussian(
ptr, capacity, seed, Element(), Element(0.5f));
}
else if (dist_kind == cutlass::Distribution::Sequential) {
// Fill with increasing elements
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(1), Element());
}
else {
// Fill with all 1s
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(), Element(1));
}
}
/// Initializes data structures
void initialize(int batch_count) {
srand(seed);
int64_t total_elements_A = options.problem_each.m() * options.problem_each.k() * batch_count;
int64_t total_elements_B = options.problem_each.n() * options.problem_each.k() * batch_count;
int64_t total_elements_C = options.problem_each.m() * options.problem_each.n() * batch_count;
int64_t total_elements_D = options.problem_each.m() * options.problem_each.n() * batch_count;
// Allocate space
block_A.reset(total_elements_A);
block_B.reset(total_elements_B);
block_C.reset(total_elements_C);
block_D.reset(total_elements_D);
// Initialize input tensors
initialize_tensor_(block_A.get(), total_elements_A, init_A, seed * 2021);
initialize_tensor_(block_B.get(), total_elements_B, init_B, seed * 2022);
initialize_tensor_(block_C.get(), total_elements_C, init_C, seed * 2023);
cutlass::reference::device::BlockFillSequential(
block_D.get(), total_elements_D, ElementC(), ElementC());
}
/// Check device GEMM results against a reference implementation with separate host-based permutation
template<typename Gemm>
bool validate(Gemm const &gemm) {
bool constexpr kBatched = PermuteInfo<typename Gemm::PermuteALayout>::kBatched
|| PermuteInfo<typename Gemm::PermuteBLayout>::kBatched
|| PermuteInfo<typename Gemm::PermuteDLayout>::kBatched;
int const batch_count = kBatched ? options.batch_count : 1;
cutlass::gemm::GemmCoord problem = options.problem_each;
cutlass::MatrixCoord extent_A{problem.m(), problem.k()};
cutlass::MatrixCoord extent_B{problem.k(), problem.n()};
cutlass::MatrixCoord extent_C{problem.m(), problem.n()};
using LayoutA = typename Gemm::LayoutA;
using LayoutB = typename Gemm::LayoutB;
using LayoutC = typename Gemm::LayoutC;
LayoutA layout_A(LayoutA::packed(extent_A));
LayoutB layout_B(LayoutB::packed(extent_B));
LayoutC layout_C(LayoutC::packed(extent_C));
auto size_A = layout_A.capacity(extent_A) * batch_count;
auto size_B = layout_B.capacity(extent_B) * batch_count;
auto size_C = layout_C.capacity(extent_C) * batch_count;
cutlass::TensorView<ElementA, LayoutA> view_A(block_A.get(), layout_A, extent_A);
cutlass::TensorView<ElementB, LayoutB> view_B(block_B.get(), layout_B, extent_B);
cutlass::TensorView<ElementC, LayoutC> view_C(block_C.get(), layout_C, extent_C);
cutlass::TensorView<ElementC, LayoutC> view_D(block_D.get(), layout_C, extent_C);
cutlass::DeviceAllocation<ElementA> block_A_perm(size_A);
cutlass::DeviceAllocation<ElementA> block_B_perm(size_B);
cutlass::TensorView<ElementA, LayoutA> view_A_perm(block_A_perm.get(), layout_A, extent_A);
cutlass::TensorView<ElementB, LayoutB> view_B_perm(block_B_perm.get(), layout_B, extent_B);
permute_host<typename Gemm::PermuteALayout>(view_A.const_view(), view_A_perm, batch_count);
permute_host<typename Gemm::PermuteBLayout>(view_B.const_view(), view_B_perm, batch_count);
cutlass::DeviceAllocation<ElementC> block_D_ref(size_C);
cutlass::TensorView<ElementC, LayoutC> view_D_ref(block_D_ref.get(), layout_C, extent_C);
using EpilogueOutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp;
// Reference GEMM
cutlass::reference::device::GemmComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
typename EpilogueOutputOp::ElementCompute,
typename Gemm::ElementAccumulator
>(
problem,
options.alpha,
view_A_perm,
Gemm::kTransformA,
view_B_perm,
Gemm::kTransformB,
options.beta,
view_C,
view_D_ref,
ElementAccumulator(0),
batch_count,
options.problem_each.m() * options.problem_each.k(),
options.problem_each.n() * options.problem_each.k(),
options.problem_each.m() * options.problem_each.n(),
options.problem_each.m() * options.problem_each.n()
);
cutlass::DeviceAllocation<ElementC> block_D_perm(size_C);
cutlass::TensorView<ElementC, LayoutC> view_D_perm(block_D_perm.get(), layout_C, extent_C);
permute_host<typename Gemm::PermuteDLayout>(view_D_ref.const_view(), view_D_perm, batch_count);
// Reference check
return cutlass::reference::device::BlockCompareEqual(view_D_perm.data(), view_D.data(), size_C);
}
public:
template<typename Gemm>
bool profile_GEMM_permute() {
using LayoutA = typename Gemm::LayoutA;
using LayoutB = typename Gemm::LayoutB;
using LayoutC = typename Gemm::LayoutC;
using PermuteALayout = typename Gemm::PermuteALayout;
using PermuteBLayout = typename Gemm::PermuteBLayout;
using PermuteDLayout = typename Gemm::PermuteDLayout;
bool constexpr kBatched = PermuteInfo<PermuteALayout>::kBatched
|| PermuteInfo<PermuteBLayout>::kBatched
|| PermuteInfo<PermuteDLayout>::kBatched;
std::cout << "\n"
"====================================================\n"
<< (kBatched ? "Batched" : "Normal") << " GEMM:"
<< "\n A=" << LayoutInfo<LayoutA>::name() << "," << PermuteInfo<PermuteALayout>::name()
<< "\n B=" << LayoutInfo<LayoutB>::name() << "," << PermuteInfo<PermuteBLayout>::name()
<< "\n D=" << LayoutInfo<LayoutC>::name() << "," << PermuteInfo<PermuteDLayout>::name()
<< "\n"
"====================================================\n";
if (options.verbose) {
print_tensor_info<PermuteALayout>(std::cout, "A", 0, 2);
print_tensor_info<PermuteBLayout>(std::cout, "B", 2, 1);
print_tensor_info<PermuteDLayout>(std::cout, "D", 0, 1);
}
std::cout << std::endl;
bool valid = true;
valid &= check_tensor_shape<LayoutA, PermuteALayout, Gemm::kAlignmentA>("A", 0, 2);
valid &= check_tensor_shape<LayoutB, PermuteBLayout, Gemm::kAlignmentB>("B", 2, 1);
valid &= check_tensor_shape<LayoutC, PermuteDLayout, Gemm::kAlignmentC>("D", 0, 1);
if (!valid)
{
std::cout << "Skipped test" << std::endl;
return true;
}
int const batch_count = kBatched ? options.batch_count : 1;
// Initialize the problem
initialize(batch_count);
// Configure the GEMM arguments
using EpilogueOutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp;
typename EpilogueOutputOp::Params epilogue_op(options.alpha, options.beta);
// Please make sure all problem_sizes are the same for kBatched mode
auto problem = options.problem_each;
cutlass::MatrixCoord extent_A{problem.m(), problem.k()};
cutlass::MatrixCoord extent_B{problem.k(), problem.n()};
cutlass::MatrixCoord extent_C{problem.m(), problem.n()};
LayoutA layout_A(LayoutA::packed(extent_A));
LayoutB layout_B(LayoutB::packed(extent_B));
LayoutC layout_C(LayoutC::packed(extent_C));
// Configure GEMM arguments
typename Gemm::Arguments arguments{
kBatched ? cutlass::gemm::GemmUniversalMode::kBatched : cutlass::gemm::GemmUniversalMode::kGemm,
problem,
batch_count,
epilogue_op,
(void*)block_A.get(),
(void*)block_B.get(),
(void*)block_C.get(),
(void*)block_D.get(),
// For any non-trivial permute the batch stride must be set to 0
cutlass::layout::is_trivial_permute<PermuteALayout> ? layout_A.capacity(extent_A) : 0,
cutlass::layout::is_trivial_permute<PermuteBLayout> ? layout_B.capacity(extent_B) : 0,
layout_C.capacity(extent_C),
cutlass::layout::is_trivial_permute<PermuteDLayout> ? layout_C.capacity(extent_C) : 0,
layout_A.stride(0),
layout_B.stride(0),
layout_C.stride(0),
layout_C.stride(0),
};
// Initialize the GEMM object
Gemm gemm_normal;
CHECK_CUTLASS_CALL(gemm_normal.initialize(arguments, nullptr), return false);
// Run the normal GEMM object
CHECK_CUTLASS_CALL(gemm_normal.run(), return false);
// Wait for completion
CHECK_CUDA_CALL(cudaDeviceSynchronize(), return false);
//
// Verify correctness
//
if (options.reference_check) {
if (validate(gemm_normal)) {
std::cout << "\nPassed verification\n" << std::endl;
}
else {
std::cerr << "\n*** Error - problem failed the QA check ***\n" << std::endl;
return false;
}
}
// Warm-up run of the normal GEMM object
CHECK_CUTLASS_CALL(gemm_normal.run(), return false);
// Construct events
cudaEvent_t events[2];
for (auto & event : events) {
CHECK_CUDA_CALL(cudaEventCreate(&event), return false);
}
// Record an event at the start of a series of GEMM operations
CHECK_CUDA_CALL(cudaEventRecord(events[0]), return false);
// Run profiling loop
for (int iter = 0; iter < options.iterations; ++iter) {
gemm_normal();
}
// Record an event when the GEMM operations have been launched.
CHECK_CUDA_CALL(cudaEventRecord(events[1]), return false);
// Wait for work on the device to complete.
CHECK_CUDA_CALL(cudaEventSynchronize(events[1]), return false);
// Measure elapsed runtime
float runtime_total_ms = 0;
CHECK_CUDA_CALL(cudaEventElapsedTime(&runtime_total_ms, events[0], events[1]), return false);
// Compute average runtime and GFLOPs.
double runtime_avg_ms = double(runtime_total_ms) / double(options.iterations);
double gflops = options.gflops(runtime_avg_ms / 1000.0, kBatched);
// Cleanup
for (auto event : events) {
CHECK_CUDA_CALL(cudaEventDestroy(event), return false);
}
std::cout << " Runtime: " << runtime_avg_ms << " ms\n"
" GFLOPs: " << gflops << std::endl;
return true;
}
};
/// Shorthand alist for GEMM instantiations
template<typename LayoutA, typename PermuteALayout,
typename LayoutB, typename PermuteBLayout,
typename LayoutC, typename PermuteDLayout>
using GemmPermute = cutlass::gemm::device::GemmUniversal<
ElementInput, LayoutA,
ElementInput, LayoutB,
ElementOutput, LayoutC,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<
ElementOutput,
AlignmentC, //128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
4, /*kStages*/
AlignmentA, /*AlignmentA*/
AlignmentB, /*AlignmentB*/
cutlass::arch::OpMultiplyAdd,
cutlass::ComplexTransform::kNone,
cutlass::ComplexTransform::kNone,
false, /*GatherA*/
false, /*GatherB*/
false, /*ScatterD*/
PermuteDLayout, /*PermuteDLayout*/
typename cutlass::layout::InversePermute<PermuteALayout>::type, /*PermuteALayout*/
typename cutlass::layout::InversePermute<PermuteBLayout>::type /*PermuteBLayout*/
>;
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
//
// This example uses mma.sync to directly access Tensor Cores to achieve peak performance.
//
cudaDeviceProp props;
CHECK_CUDA_CALL(cudaGetDeviceProperties(&props, 0), return EXIT_FAILURE);
if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) {
//
// This example requires an NVIDIA Ampere-architecture GPU.
//
std::cout << "CUTLASS's GEMM+Permute example requires a GPU of NVIDIA's Ampere Architecture "
"or later (compute capability 80 or greater).\n";
return EXIT_SUCCESS;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return EXIT_SUCCESS;
}
if (options.error) {
std::cerr << "Aborting execution." << std::endl;
return EXIT_FAILURE;
}
//
// Define GEMM types to test
//
//
// TTT (Row-major) GEMMs
//
using TTTGemmNormalPermuteNone = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::NoPermute
>;
using TTTGemmNormalPermuteA = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::NoPermute
>;
using TTTGemmNormalPermuteAD = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>
>;
using TTTGemmNormalPermuteB = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::NoPermute
>;
using TTTGemmNormalPermuteBD = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>
>;
using TTTGemmNormalPermuteD = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>
>;
using TTTGemmNormalPermuteAB = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::NoPermute
>;
using TTTGemmNormalPermuteABD = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>
>;
//
// NNN (Col-major) GEMMs
//
using NNNGemmNormalPermuteNone = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute
>;
using NNNGemmNormalPermuteA = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute
>;
using NNNGemmNormalPermuteAD = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>
>;
using NNNGemmNormalPermuteB = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute
>;
using NNNGemmNormalPermuteBD = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>
>;
using NNNGemmNormalPermuteD = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>
>;
using NNNGemmNormalPermuteAB = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute
>;
using NNNGemmNormalPermuteABD = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>
>;
//
// NNT (Col-major inputs, row-major output) GEMMs
//
using NNTGemmNormalPermuteNone = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::NoPermute
>;
using NNTGemmNormalPermuteA = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::NoPermute
>;
using NNTGemmNormalPermuteAD = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>
>;
using NNTGemmNormalPermuteB = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::NoPermute
>;
using NNTGemmNormalPermuteBD = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>
>;
using NNTGemmNormalPermuteD = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>
>;
using NNTGemmNormalPermuteAB = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::NoPermute
>;
using NNTGemmNormalPermuteABD = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>
>;
//
// TTN (Row-major inputs, col-major output) GEMMs
//
using TTNGemmNormalPermuteNone = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute
>;
using TTNGemmNormalPermuteA = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute
>;
using TTNGemmNormalPermuteAD = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>
>;
using TTNGemmNormalPermuteB = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute
>;
using TTNGemmNormalPermuteBD = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>
>;
using TTNGemmNormalPermuteD = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>
>;
using TTNGemmNormalPermuteAB = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute
>;
using TTNGemmNormalPermuteABD = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>
>;
//
// TTT (Row-major) BMMs
//
using TTTGemmBatchedPermuteA = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>,
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::NoPermute
>;
using TTTGemmBatchedPermuteAD = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>,
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>
>;
using TTTGemmBatchedPermuteB = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>,
cutlass::layout::RowMajor, cutlass::layout::NoPermute
>;
using TTTGemmBatchedPermuteBD = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>
>;
using TTTGemmBatchedPermuteD = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>
>;
using TTTGemmBatchedPermuteAB = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::NoPermute,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>
>;
using TTTGemmBatchedPermuteABD = GemmPermute<
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>,
cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>
>;
//
// NNN (Col-major) BMMs
//
using NNNGemmBatchedPermuteA = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute
>;
using NNNGemmBatchedPermuteAD = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>
>;
using NNNGemmBatchedPermuteB = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute
>;
using NNNGemmBatchedPermuteBD = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>
>;
using NNNGemmBatchedPermuteD = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>
>;
using NNNGemmBatchedPermuteAB = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>,
cutlass::layout::ColumnMajor, cutlass::layout::NoPermute
>;
using NNNGemmBatchedPermuteABD = GemmPermute<
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>,
cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>
>;
//
// Profile it
//
Testbed<ElementInput, ElementInput, ElementOutput> testbed(options);
bool result = true;
result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteNone>();
result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteA>();
result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteAD>();
result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteB>();
result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteBD>();
result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteD>();
result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteAB>();
result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteABD>();
result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteNone>();
result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteA>();
result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteAD>();
result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteB>();
result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteBD>();
result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteD>();
result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteAB>();
result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteABD>();
result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteNone>();
result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteA>();
result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteAD>();
result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteB>();
result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteBD>();
result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteD>();
result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteAB>();
result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteABD>();
result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteNone>();
result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteA>();
result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteAD>();
result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteB>();
result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteBD>();
result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteD>();
result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteAB>();
result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteABD>();
result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteA>();
result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteAD>();
result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteB>();
result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteBD>();
result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteD>();
result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteAB>();
result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteABD>();
result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteA>();
result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteAD>();
result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteB>();
result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteBD>();
result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteD>();
result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteAB>();
result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteABD>();
std::cout << "\n"
"====================================================\n"
"Finished (" << (result ? "PASS" : "FAIL") << ")\n"
"====================================================" << std::endl;
return result ? EXIT_SUCCESS : EXIT_FAILURE;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/39_gemm_permute/gemm_permute.cu/0 | {
"file_path": "examples/39_gemm_permute/gemm_permute.cu",
"repo_id": "examples",
"token_count": 19068
} | 8 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory
to match canonical tensor layouts in global memory. Epilogues support
conversion and reduction operations.
This is a copy of cutlass/epilogue/threadblock/epilogue.h that can
handle "row_id" as a first argument, as uses it to get the corresponding
`m_prime` / `s_prime` to rescale the output.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/vector.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/scale_type.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "epilogue_pipelined.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements.
// output <- alpha * accumulator + beta * source
// with:
// alpha = 1 / s_prime (to normalize when isLast=True, 1 otherwise)
// beta = alpha / m_prime (renormalize the output when the max changes)
// source is the current output
template <
typename ElementOutput_, ///< Data type used to store tensors
typename ElementSource_, //< Data type for source (usually matches
//`ElementOutput`)
int Count, ///< Number of elements computed per operation.
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data
///< to store
typename ElementAccumulator_, ///< Accumulator data type
typename ElementCompute_, ///< Data type used to compute linear combination
bool isFirst,
bool isLast,
typename FragmentAlphaBeta_,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest>
class MemoryEfficientAttentionNormalize {
public:
using ElementOutput = ElementOutput_;
using ElementSource = ElementSource_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentSource = Array<ElementSource, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
using FragmentAlphaBeta = FragmentAlphaBeta_;
static FloatRoundStyle const kRound = Round;
private:
//
// Data members
//
FragmentAlphaBeta const& s_prime_;
FragmentAlphaBeta const& m_prime_;
public:
/// Constructs the function object, possibly loading from pointers in host
/// memory
CUTLASS_HOST_DEVICE
MemoryEfficientAttentionNormalize(
FragmentAlphaBeta const& s_prime,
FragmentAlphaBeta const& m_prime)
: s_prime_(s_prime), m_prime_(m_prime) {}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return !isFirst;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
int row,
FragmentAccumulator const& accumulator,
FragmentSource const& source) const {
assert(!isFirst);
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementSource, kCount, Round>
source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
accumulator_converter;
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
ComputeFragment converted_source = source_converter(source);
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_source;
multiply_add<ComputeFragment> mul_add_accumulator;
ElementCompute alpha = isLast ? (1 / s_prime_[row]) : 1;
ElementCompute beta = alpha * m_prime_[row];
intermediate = mul_add_source(beta, converted_source); // X = beta * C
intermediate = mul_add_accumulator(
alpha, converted_accumulator, intermediate); // D = alpha * Accum + X
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(int row, FragmentAccumulator const& accumulator)
const {
assert(isFirst);
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
accumulator_converter;
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_accumulator;
ElementCompute alpha = isLast ? (1 / s_prime_[row]) : 1;
intermediate = mul_accumulator(
alpha, converted_accumulator); // X = alpha * C + uniform
return destination_converter(intermediate);
}
};
} // namespace thread
namespace threadblock {
template <
typename EO,
typename ES,
int Count,
typename EA,
typename EC,
bool F,
bool L,
typename FAB,
FloatRoundStyle R>
struct ApplyEpilogueOp<thread::MemoryEfficientAttentionNormalize<
EO,
ES,
Count,
EA,
EC,
F,
L,
FAB,
R>> {
using Op = thread::
MemoryEfficientAttentionNormalize<EO, ES, Count, EA, EC, F, L, FAB, R>;
static CUTLASS_DEVICE typename Op::FragmentOutput apply(
Op const& output_op,
int row_id,
typename Op::FragmentAccumulator const& accum,
typename Op::FragmentSource const& source) {
return output_op(row_id, accum, source);
}
static CUTLASS_DEVICE typename Op::FragmentOutput apply(
Op const& output_op,
int row_id,
typename Op::FragmentAccumulator const& accum) {
return output_op(row_id, accum);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/epilogue/epilogue_rescale_output.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/epilogue/epilogue_rescale_output.h",
"repo_id": "examples",
"token_count": 2907
} | 9 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Instanciates the right WarpIterator to read from shared memory
The class `DefaultWarpIteratorAFromSharedMemory` is useful when reading
data dumped with `B2bGemm::accumToSmem`.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h"
#include "cutlass/platform/platform.h"
#include "warp_iterator_from_smem.h"
namespace cutlass {
namespace gemm {
namespace threadblock {
template <
typename WarpShape,
typename InstructionShape,
typename RegularWarpIterator,
typename Policy,
typename Enable = void>
struct DefaultWarpIteratorAFromSharedMemory {};
// TensorOp - Ampere half
template <typename RegularWarpIterator, typename Policy, int kInstrK>
struct DefaultWarpIteratorAFromSharedMemory<
cutlass::gemm::GemmShape<32, 32, 32>,
cutlass::gemm::GemmShape<16, 8, kInstrK>,
RegularWarpIterator,
Policy,
typename platform::enable_if<(
sizeof_bits<typename RegularWarpIterator::Element>::value == 16 &&
Policy::Operator::Policy::OpDelta::kRow == 1)>::type> {
using OpDelta = typename Policy::Operator::Policy::OpDelta;
using WarpShape = cutlass::MatrixShape<32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, kInstrK>;
using WarpIterator = cutlass::gemm::warp::WarpIteratorFromSmem<
cutlass::gemm::Operand::kA,
typename RegularWarpIterator::Element,
cutlass::MatrixShape<InstructionShape::kM, InstructionShape::kK>>;
};
// TensorOp - Ampere f32
template <typename WarpShape, typename RegularWarpIterator, typename Policy>
struct DefaultWarpIteratorAFromSharedMemory<
WarpShape,
cutlass::gemm::GemmShape<16, 8, 8>,
RegularWarpIterator,
Policy,
typename platform::enable_if<(
sizeof_bits<typename RegularWarpIterator::Element>::value != 16 ||
Policy::Operator::Policy::OpDelta::kRow != 1)>::type> {
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
static constexpr auto kWarpSize = 32;
using OpDelta = typename Policy::Operator::Policy::OpDelta;
using WarpIterator =
cutlass::gemm::warp::MmaTensorOpMultiplicandTileAccessIterator<
cutlass::MatrixShape<WarpShape::kM, WarpShape::kK>,
cutlass::gemm::Operand::kA,
typename RegularWarpIterator::Element,
cutlass::layout::RowMajor,
cutlass::MatrixShape<InstructionShape::kM, InstructionShape::kK>,
OpDelta::kRow,
kWarpSize>;
};
// TensorOp - Volta
template <typename WarpShape, typename RegularWarpIterator, typename Policy>
struct DefaultWarpIteratorAFromSharedMemory<
WarpShape,
cutlass::gemm::GemmShape<16, 16, 4>,
RegularWarpIterator,
Policy> {
using InstructionShape = cutlass::gemm::GemmShape<16, 16, 4>;
static constexpr auto kWarpSize = 32;
using OpDelta = typename Policy::Operator::Policy::OpDelta;
using WarpIterator =
cutlass::gemm::warp::MmaVoltaTensorOpMultiplicandTileIterator<
cutlass::MatrixShape<32, 32>, // MatrixShape<WarpShape::kM,
// WarpShape::kK>,
cutlass::gemm::Operand::kA,
typename RegularWarpIterator::Element,
cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<16, 32>,
cutlass::MatrixShape<16, 4>,
OpDelta::kRow,
kWarpSize>;
};
// Simt
template <typename WarpShape, typename RegularWarpIterator, typename Policy>
struct DefaultWarpIteratorAFromSharedMemory<
WarpShape,
cutlass::gemm::GemmShape<1, 1, 1>,
RegularWarpIterator,
Policy> {
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr auto kWarpSize = 32;
// We just use the same iterator, as we reproduced the same shared-memory
// schema. Just modify it to handle non-complete tiles.
using WarpIterator = RegularWarpIterator;
};
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| examples/41_fused_multi_head_attention/iterators/default_warp_iterator_from_smem.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/iterators/default_warp_iterator_from_smem.h",
"repo_id": "examples",
"token_count": 1962
} | 10 |
{
"0": {
"A_tp": "fp16", "B_tp": "fp16", "C_tp": "fp16", "Acc_tp": "fp16",
"A_format": "Row", "B_format": "Col", "C_format": "Row",
"mnk": [15000, 256, 32],
"epilogue": {
"tp": "LeakyRelu",
"bias": {"addbias": false, "bias_tp": "mat"},
"args": [["float", "leaky_alpha", 1.3]]
}
},
"1": {
"A_tp": "fp16", "B_tp": "fp16", "C_tp": "fp16", "Acc_tp": "fp16",
"A_format": "Row", "B_format": "Col", "C_format": "Row",
"mnk": [15000, 128, 256],
"epilogue": {
"tp": "LeakyRelu",
"bias": {"addbias": false, "bias_tp": "mat"},
"args": [["float", "leaky_alpha", 1.3]]
}
},
"2": {
"A_tp": "fp16", "B_tp": "fp16", "C_tp": "fp16", "Acc_tp": "fp16",
"A_format": "Row", "B_format": "Col", "C_format": "Row",
"mnk": [15000, 64, 128],
"epilogue": {
"tp": "LeakyRelu",
"bias": {"addbias": false, "bias_tp": "mat"},
"args": [["float", "leaky_alpha", 1.3]]
}
}
}
| examples/44_multi_gemm_ir_and_codegen/config.json/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/config.json",
"repo_id": "examples",
"token_count": 658
} | 11 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
import gen_ir as ir
import gen_turing_and_volta as gen_basic
class gen_verify:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.name = gen_class_name + "_verify"
self.b2b_num = len(fuse_gemm_info)
self.params = []
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.separate_cutlass = gen_basic.gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
self.gen_params()
self.output_dir = output_dir
def gen_code(self):
code = ""
code += self.user_header_file
code += self.separate_cutlass.gen_using(False) #False -> Turing, True -> Volta
code_body = ""
for i in range(self.b2b_num):
code_body += " " + helper.var_idx("Gemm", i) + helper.var_idx(" gemm_op_", i) + ";\n"
code_body += " " + helper.var_idx("gemm_op_", i) + helper.var_idx(".initialize(Arguments_", i) + ", nullptr);\n"
code_body += self.separate_cutlass.gen_run()
code += ir.gen_func(self.name, self.params, code_body)
helper.write_2_headfile("cutlass_verify.h", self.output_dir, code)
def gen_params(self):
for i in range(self.b2b_num):
self.params.append(
(
helper.var_idx("typename Gemm", i)+ "::Arguments",
helper.var_idx("Arguments_", i)
)
)
def get_params(self, declartion = True):
code = ""
if declartion:
for param in self.params:
code += param[0] + " " + param[1] + ";\n"
return code
def gen_initialize():
code = ""
initialize_code = self.separate_cutlass.gen_initialize()
code = ir.gen_func("initialize", [[]])
| examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_verify.py/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_verify.py",
"repo_id": "examples",
"token_count": 1431
} | 12 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/threadblock/mma_base.h"
#include "dual_mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B0 operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB0_,
/// Iterates over tiles of B0 operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB0_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Iterates over tiles of B1 operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB1_,
/// Iterates over tiles of B1 operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB1_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy0_,
/// B1-specific version of the policy (concept: MmaPolicy)
typename Policy1_,
/// Number of stages,
int Stages,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
/// Used for partial specialization
typename Enable = bool>
class DualMmaMultistage :
public DualMmaBase<Shape_, Policy0_, Policy1_, Stages> {
public:
///< Base class
using Base = DualMmaBase<Shape_, Policy0_, Policy1_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B0 operand in global memory
using IteratorB0 = IteratorB0_;
///< Iterates over tiles of B1 operand in global memory
using IteratorB1 = IteratorB1_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy0 = Policy0_;
using Policy1 = Policy1_;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB0 = SmemIteratorB0_;
using SmemIteratorB1 = SmemIteratorB1_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = typename Policy0::Operator::FragmentC;
/// Warp-level Mma
using Operator0 = typename Policy0::Operator;
using Operator1 = typename Policy1::Operator;
/// Minimum architecture is Sm80 to support cp.async
using ArchTag = arch::Sm80;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator0::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB0 = Operator0::kTransformB;
static ComplexTransform const kTransformB1 = Operator1::kTransformB;
/// Internal structure exposed for introspection.
struct Detail {
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB0::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
};
private:
using WarpLoadedFragmentA = typename Operator0::FragmentA;
using WarpLoadedFragmentB0 = typename Operator0::FragmentB;
using WarpLoadedFragmentB1 = typename Operator1::FragmentB;
using WarpTransformedFragmentA = typename Operator0::TransformedFragmentA;
using WarpTransformedFragmentB0 = typename Operator0::TransformedFragmentB;
using WarpTransformedFragmentB1 = typename Operator1::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB0 smem_iterator_B0_;
SmemIteratorB1 smem_iterator_B1_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
DualMmaMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B0_(shared_storage.operand_B0_ref(), thread_idx),
smem_iterator_B1_(shared_storage.operand_B1_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B0_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
this->warp_tile_iterator_B1_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB0 &iterator_B0, IteratorB1 &iterator_B1,
int group_start_A = 0, int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A *
IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_A.get();
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, iterator_A.valid());
} else {
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, iterator_A.valid());
}
++iterator_A;
}
++this->smem_iterator_A_;
}
}
iterator_B0.set_iteration_index(group_start_B *
IteratorB0::kAccessesPerVector);
iterator_B1.set_iteration_index(group_start_B *
IteratorB1::kAccessesPerVector);
this->smem_iterator_B0_.set_iteration_index(group_start_B);
this->smem_iterator_B1_.set_iteration_index(group_start_B);
// Async Copy for operand B0
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB0::AccessType *>(
this->smem_iterator_B0_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB0::Element>::value *
IteratorB0::ThreadMap::kElementsPerAccess /
IteratorB0::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB0::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B0.get();
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B0.valid());
} else {
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B0.valid());
}
++iterator_B0;
}
++this->smem_iterator_B0_;
}
}
// Async Copy for operand B1
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB1::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB1::AccessType *>(
this->smem_iterator_B1_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value *
IteratorB1::ThreadMap::kElementsPerAccess /
IteratorB1::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B1.get();
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B1.valid());
} else {
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B1.valid());
}
++iterator_B1;
}
++this->smem_iterator_B1_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum0,
FragmentC &accum1,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB0 iterator_B0,
IteratorB1 iterator_B1,
///< initial value of accumulator
FragmentC const &src_accum0,
FragmentC const &src_accum1
) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B0.clear_mask(gemm_k_iterations == 0);
iterator_B1.clear_mask(gemm_k_iterations == 0);
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
int src_bytes = (iterator_A.valid() ? kSrcBytes : 0);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
iterator_B0.set_iteration_index(0);
iterator_B1.set_iteration_index(0);
this->smem_iterator_B0_.set_iteration_index(0);
this->smem_iterator_B1_.set_iteration_index(0);
// Async Copy for operand B0
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB0::AccessType *>(
this->smem_iterator_B0_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB0::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB0::Element>::value *
IteratorB0::ThreadMap::kElementsPerAccess /
IteratorB0::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B0.get(), iterator_B0.valid());
++iterator_B0;
}
++this->smem_iterator_B0_;
}
// Async Copy for operand B1
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB1::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB1::AccessType *>(
this->smem_iterator_B1_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB1::Element>::value *
IteratorB1::ThreadMap::kElementsPerAccess /
IteratorB1::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B1.get(), iterator_B1.valid());
++iterator_B1;
}
++this->smem_iterator_B1_;
}
// Move to the next stage
iterator_A.add_tile_offset({0, 1});
iterator_B0.add_tile_offset({1, 0});
iterator_B1.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B0_.add_tile_offset({1, 0});
this->smem_iterator_B1_.add_tile_offset({1, 0});
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum0 = src_accum0;
accum1 = src_accum1;
//
// Clear the remaining tiles of SMEM. This is a functional requirement for some kernels
// so that all accumulator elements outside the GEMM footprint are zero.
//
if (SharedMemoryClear == SharedMemoryClearOption::kClearLastStage) {
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA last_smem_iterator_A(this->smem_iterator_A_);
typename IteratorA::AccessType zero_A;
zero_A.clear();
last_smem_iterator_A.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
last_smem_iterator_A.get());
*dst_ptr = zero_A;
++last_smem_iterator_A;
}
typename IteratorB0::AccessType zero_B;
zero_B.clear();
/// Iterator to write threadblock-scoped tile of B0 operand to shared memory
SmemIteratorB0 last_smem_iterator_B0(this->smem_iterator_B0_);
last_smem_iterator_B0.set_iteration_index(0);
// Async Copy for operand B0
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB0::AccessType *>(
last_smem_iterator_B0.get());
*dst_ptr = zero_B;
++last_smem_iterator_B0;
}
/// Iterator to write threadblock-scoped tile of B1 operand to shared memory
SmemIteratorB1 last_smem_iterator_B1(this->smem_iterator_B1_);
last_smem_iterator_B1.set_iteration_index(0);
// Async Copy for operand B1
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB1::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB1::AccessType *>(
last_smem_iterator_B1.get());
*dst_ptr = zero_B;
++last_smem_iterator_B1;
}
}
// Waits until stages up to the previous (kStages-2)th stage have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[2];
WarpLoadedFragmentB0 warp_loaded_frag_B0[2];
WarpLoadedFragmentB1 warp_loaded_frag_B1[2];
WarpTransformedFragmentA warp_transformed_frag_A[2];
WarpTransformedFragmentB0 warp_transformed_frag_B0[2];
WarpTransformedFragmentB1 warp_transformed_frag_B1[2];
Operator0 warp_mma0;
Operator1 warp_mma1;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B0_.set_kgroup_index(0);
this->warp_tile_iterator_B1_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[0]);
this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B0_;
++this->warp_tile_iterator_B1_;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B0.clear_mask(gemm_k_iterations == 0);
iterator_B1.clear_mask(gemm_k_iterations == 0);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma0.transform(warp_transformed_frag_A[0], warp_transformed_frag_B0[0],
warp_loaded_frag_A[0], warp_loaded_frag_B0[0]);
warp_mma1.transform(warp_transformed_frag_A[0], warp_transformed_frag_B1[0],
warp_loaded_frag_A[0], warp_loaded_frag_B1[0]);
// tf32x3 kernels use staging accumulation. warp_mma uses a temporary
// accumulator and this temporary accumulator is added to the final
// accumulator once in every mainloop iteration.
plus<FragmentC> plus_accum;
FragmentC tmp_accum0, tmp_accum1;
if (platform::is_same<typename Operator0::MathOperator,
arch::OpMultiplyAddFastF32>::value
|| platform::is_same<typename Operator0::MathOperator,
arch::OpMultiplyAddComplexFastF32>::value) {
tmp_accum0.clear();
tmp_accum1.clear();
}
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B0_;
++this->warp_tile_iterator_B1_;
if (warp_mma_k > 0) {
warp_mma0.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B0[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B0[warp_mma_k % 2]);
warp_mma1.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B1[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B1[warp_mma_k % 2]);
}
if (platform::is_same<typename Operator0::MathOperator,
arch::OpMultiplyAddFastF32>::value
|| platform::is_same<typename Operator0::MathOperator,
arch::OpMultiplyAddComplexFastF32>::value) {
warp_mma0(
tmp_accum0,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B0[warp_mma_k % 2],
tmp_accum0
);
warp_mma1(
tmp_accum1,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B1[warp_mma_k % 2],
tmp_accum1
);
if (warp_mma_k == 0) {
accum0 = plus_accum(accum0, tmp_accum0);
accum1 = plus_accum(accum1, tmp_accum1);
tmp_accum0.clear();
tmp_accum1.clear();
}
} else {
warp_mma0(
accum0,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B0[warp_mma_k % 2],
accum0
);
warp_mma1(
accum1,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B1[warp_mma_k % 2],
accum1
);
}
// Issue global->shared copies for the this stage
if (warp_mma_k < Base::kWarpGemmIterations - 1) {
int group_start_iteration_A, group_start_iteration_B;
group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA;
group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB;
copy_tiles_and_advance(iterator_A, iterator_B0, iterator_B1, group_start_iteration_A,
group_start_iteration_B);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
int group_start_iteration_A, group_start_iteration_B;
group_start_iteration_A =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB;
copy_tiles_and_advance(iterator_A, iterator_B0, iterator_B1, group_start_iteration_A,
group_start_iteration_B);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Waits until stages up to the previous (kStages-2)th stage have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A.add_tile_offset({0, 1});
iterator_B0.add_tile_offset({1, 0});
iterator_B1.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B0_.add_tile_offset({1, 0});
this->smem_iterator_B1_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B0_.add_tile_offset({-Base::kStages, 0});
this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy0::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B0_.add_tile_offset(
{-Base::kStages * Policy0::kPartitionsK *
Base::kWarpGemmIterations,
0});
this->warp_tile_iterator_B1_.add_tile_offset(
{-Base::kStages * Policy1::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B0.clear_mask(gemm_k_iterations == 0);
iterator_B1.clear_mask(gemm_k_iterations == 0);
}
// Do any conversions feeding the first stage at the end of the loop so
// we can start right away on mma instructions
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
warp_mma0.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B0[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B0[(warp_mma_k + 1) % 2]);
warp_mma1.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B1[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
}
}
}
if (platform::is_same<typename Operator0::MathOperator,
arch::OpMultiplyAddFastF32>::value
|| platform::is_same<typename Operator0::MathOperator,
arch::OpMultiplyAddComplexFastF32>::value) {
accum0 = plus_accum(accum0, tmp_accum0);
accum1 = plus_accum(accum1, tmp_accum1);
}
// commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/45_dual_gemm/threadblock/dual_mma_multistage.h/0 | {
"file_path": "examples/45_dual_gemm/threadblock/dual_mma_multistage.h",
"repo_id": "examples",
"token_count": 13243
} | 13 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "cutlass/util/helper_cuda.hpp"
template <class ProblemShape, class CtaTiler,
class TA, class AStride, class ASmemLayout, class AThreadLayout,
class TB, class BStride, class BSmemLayout, class BThreadLayout,
class TC, class CStride, class CSmemLayout, class CThreadLayout,
class Alpha, class Beta>
__global__ static
__launch_bounds__(decltype(size(CThreadLayout{}))::value)
void
gemm_device(ProblemShape shape_MNK, CtaTiler cta_tiler,
TA const* A, AStride dA, ASmemLayout sA_layout, AThreadLayout tA,
TB const* B, BStride dB, BSmemLayout sB_layout, BThreadLayout tB,
TC * C, CStride dC, CSmemLayout , CThreadLayout tC,
Alpha alpha, Beta beta)
{
using namespace cute;
// Preconditions
CUTE_STATIC_ASSERT_V(rank(shape_MNK) == Int<3>{}); // (M, N, K)
CUTE_STATIC_ASSERT_V(rank(cta_tiler) == Int<3>{}); // (BLK_M, BLK_N, BLK_K)
static_assert(is_static<AThreadLayout>::value);
static_assert(is_static<BThreadLayout>::value);
static_assert(is_static<CThreadLayout>::value);
CUTE_STATIC_ASSERT_V(size(tA) == size(tB)); // NumThreads
CUTE_STATIC_ASSERT_V(size(tC) == size(tA)); // NumThreads
CUTE_STATIC_ASSERT_V(size<0>(cta_tiler) % size<0>(tA) == Int<0>{}); // BLK_M / THR_M
CUTE_STATIC_ASSERT_V(size<2>(cta_tiler) % size<1>(tA) == Int<0>{}); // BLK_K / THR_K
CUTE_STATIC_ASSERT_V(size<1>(cta_tiler) % size<0>(tB) == Int<0>{}); // BLK_N / THR_N
CUTE_STATIC_ASSERT_V(size<2>(cta_tiler) % size<1>(tB) == Int<0>{}); // BLK_K / THR_K
CUTE_STATIC_ASSERT_V(size<0>(cta_tiler) % size<0>(tC) == Int<0>{}); // BLK_M / THR_M
CUTE_STATIC_ASSERT_V(size<1>(cta_tiler) % size<1>(tC) == Int<0>{}); // BLK_N / THR_N
static_assert(is_static<ASmemLayout>::value);
static_assert(is_static<BSmemLayout>::value);
static_assert(is_static<CSmemLayout>::value);
CUTE_STATIC_ASSERT_V(size<0>(ASmemLayout{}) == size<0>(cta_tiler)); // BLK_M
CUTE_STATIC_ASSERT_V(size<0>(CSmemLayout{}) == size<0>(cta_tiler)); // BLK_M
CUTE_STATIC_ASSERT_V(size<0>(BSmemLayout{}) == size<1>(cta_tiler)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(CSmemLayout{}) == size<1>(cta_tiler)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(ASmemLayout{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(size<1>(BSmemLayout{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(congruent(select<0,2>(shape_MNK), dA)); // dA strides for shape MK
CUTE_STATIC_ASSERT_V(congruent(select<1,2>(shape_MNK), dB)); // dB strides for shape NK
CUTE_STATIC_ASSERT_V(congruent(select<0,1>(shape_MNK), dC)); // dC strides for shape MN
//
// Full and Tiled Tensors
//
// Represent the full tensors
Tensor mA = make_tensor(make_gmem_ptr(A), select<0,2>(shape_MNK), dA); // (M,K)
Tensor mB = make_tensor(make_gmem_ptr(B), select<1,2>(shape_MNK), dB); // (N,K)
Tensor mC = make_tensor(make_gmem_ptr(C), select<0,1>(shape_MNK), dC); // (M,N)
// Get the appropriate blocks for this thread block
auto cta_coord = make_coord(blockIdx.x, blockIdx.y, _); // (m,n,k)
Tensor gA = local_tile(mA, cta_tiler, cta_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,k)
Tensor gB = local_tile(mB, cta_tiler, cta_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,k)
Tensor gC = local_tile(mC, cta_tiler, cta_coord, Step<_1,_1, X>{}); // (BLK_M,BLK_N)
// Shared memory buffers
__shared__ TA smemA[cosize_v<ASmemLayout>];
__shared__ TB smemB[cosize_v<BSmemLayout>];
Tensor sA = make_tensor(make_smem_ptr(smemA), sA_layout); // (BLK_M,BLK_K)
Tensor sB = make_tensor(make_smem_ptr(smemB), sB_layout); // (BLK_N,BLK_K)
//
// Partition the copying of A and B tiles across the threads
//
// TUTORIAL: Example of simple raked partitioning of ThreadLayouts tA|tB over data A|B tiles
Tensor tAgA = local_partition(gA, tA, threadIdx.x); // (THR_M,THR_K,k)
Tensor tAsA = local_partition(sA, tA, threadIdx.x); // (THR_M,THR_K)
Tensor tBgB = local_partition(gB, tB, threadIdx.x); // (THR_N,THR_K,k)
Tensor tBsB = local_partition(sB, tB, threadIdx.x); // (THR_N,THR_K)
CUTE_STATIC_ASSERT_V(size<0>(tAgA) == size<0>(tAsA)); // THR_M
CUTE_STATIC_ASSERT_V(size<1>(tAgA) == size<1>(tAsA)); // THR_K
CUTE_STATIC_ASSERT_V(size<0>(tBgB) == size<0>(tBsB)); // THR_N
CUTE_STATIC_ASSERT_V(size<1>(tBgB) == size<1>(tBsB)); // THR_K
//
// Define A/B partitioning and C accumulators
//
// TUTORIAL: Example of partitioning via projections of a ThreadLayout tC
// Partition sA (M,K) by the rows of tC
Tensor tCsA = local_partition(sA, tC, threadIdx.x, Step<_1, X>{}); // (THR_M,BLK_K)
// Partition sB (N,K) by the cols of tC
Tensor tCsB = local_partition(sB, tC, threadIdx.x, Step< X,_1>{}); // (THR_N,BLK_K)
// Partition gC (M,N) by the tile of tC
Tensor tCgC = local_partition(gC, tC, threadIdx.x, Step<_1,_1>{}); // (THR_M,THR_N)
// Allocate the accumulators -- same shape/layout as the partitioned data
Tensor tCrC = make_tensor_like(tCgC); // (THR_M,THR_N)
CUTE_STATIC_ASSERT_V(size<0>(tCrC) == size<0>(tCgC)); // THR_M
CUTE_STATIC_ASSERT_V(size<0>(tCrC) == size<0>(tCsA)); // THR_M
CUTE_STATIC_ASSERT_V(size<1>(tCrC) == size<1>(tCgC)); // THR_N
CUTE_STATIC_ASSERT_V(size<1>(tCrC) == size<0>(tCsB)); // THR_N
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCsB)); // BLK_K
// Clear the accumulators
clear(tCrC);
#if 0
if(thread0()) {
print(" mA : "); print( mA); print("\n");
print(" gA : "); print( gA); print("\n");
print(" sA : "); print( sA); print("\n");
print("tAgA : "); print(tAgA); print("\n");
print("tAsA : "); print(tAsA); print("\n");
}
#endif
#if 0
if(thread0()) {
print(" mB : "); print( mB); print("\n");
print(" gB : "); print( gB); print("\n");
print(" sB : "); print( sB); print("\n");
print("tBgB : "); print(tBgB); print("\n");
print("tBsB : "); print(tBsB); print("\n");
}
#endif
#if 0
if(thread0()) {
print(" mC : "); print( mC); print("\n");
print(" gC : "); print( gC); print("\n");
print("tCsA : "); print(tCsA); print("\n");
print("tCsB : "); print(tCsB); print("\n");
print("tCgC : "); print(tCgC); print("\n");
print("tCrC : "); print(tCrC); print("\n");
}
#endif
#if 1
// TUTORIAL: Example of a simple mainloop that read tiles of data into shared memory,
// and then computes on those tiles.
// copy(.) operates on the global and shared memory via the tA|tB partitioning
// gemm(.) operates on the shared and register memory via the tC partitioning
auto K_TILE_MAX = size<2>(tAgA);
for (int k_tile = 0; k_tile < K_TILE_MAX; ++k_tile)
{
// Copy gmem to smem with tA|tB thread-partitioned tensors
copy(tAgA(_,_,k_tile), tAsA); // A (THR_M,THR_K) -> (THR_M,THR_K)
copy(tBgB(_,_,k_tile), tBsB); // B (THR_N,THR_K) -> (THR_N,THR_K)
// TUTORIAL: The above call to copy(tAgA(_,_,k_tile), tAsA) is equivalent to
// Tensor tAgAk = tAgA(_,_,k_tile);
// CUTE_UNROLL
// for (int i = 0; i < size(tAsA); ++i) {
// tAsA(i) = tAgAk(i);
// }
cp_async_fence(); // Label the end of (potential) cp.async instructions
cp_async_wait<0>(); // Sync on all (potential) cp.async instructions
__syncthreads(); // Wait for all threads to write to smem
// Compute gemm on tC thread-partitioned smem
gemm(tCsA, tCsB, tCrC); // (THR_M,THR_N) += (THR_M,BLK_K) * (THR_N,BLK_K)
// TUTORIAL: The above call to gemm(tCsA, tCsB, tCrC) is equivalent to
// CUTE_UNROLL
// for (int k = 0; k < size<1>(tCsA); ++k) {
// CUTE_UNROLL
// for (int m = 0; m < size<0>(tCrC); ++m) {
// CUTE_UNROLL
// for (int n = 0; n < size<1>(tCrC); ++n) {
// tCrC(m,n) += tCsA(m,k) * tCsB(n,k);
// }
// }
// }
__syncthreads(); // Wait for all threads to read from smem
}
#endif
//
// Epilogue
//
axpby(alpha, tCrC, beta, tCgC);
// TUTORIAL: The above call to axpby(alpha, tCrC, beta, tCgC) is equivalent to
// CUTE_UNROLL
// for (int i = 0; i < size(tCsA); ++i) {
// tCgC(i) = alpha * tCrC(i) + beta * tCgC(i);
// }
}
// Setup params for an NT GEMM
// Use m-major smem sA, n-major smem sB, and mn-major threads tA|tB
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_nt(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
using namespace cute;
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define NT strides (mixed)
auto dA = make_stride(Int<1>{}, ldA); // (dM, dK)
auto dB = make_stride(Int<1>{}, ldB); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 8>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
// Define the smem layouts (static)
auto sA = make_layout(make_shape(bM, bK)); // (m,k) -> smem_idx; m-major
auto sB = make_layout(make_shape(bN, bK)); // (n,k) -> smem_idx; n-major
auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx; m-major
// Define the thread layouts (static)
auto tA = make_layout(make_shape(Int<32>{}, Int< 8>{})); // (m,k) -> thr_idx
auto tB = make_layout(make_shape(Int<32>{}, Int< 8>{})); // (n,k) -> thr_idx
auto tC = make_layout(make_shape(Int<16>{}, Int<16>{})); // (m,n) -> thr_idx
dim3 dimBlock(size(tC));
dim3 dimGrid(size(ceil_div(M, bM)),
size(ceil_div(N, bN)));
gemm_device<<<dimGrid, dimBlock, 0, stream>>>
(prob_shape, cta_tiler,
A, dA, sA, tA,
B, dB, sB, tB,
C, dC, sC, tC,
alpha, beta);
}
// Setup params for a TN GEMM
// Use padded m-major smem sA, padded n-major smem sB, and k-major threads tA|tB
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_tn(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
using namespace cute;
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define TN strides (mixed)
auto dA = make_stride(ldA, Int<1>{}); // (dM, dK)
auto dB = make_stride(ldB, Int<1>{}); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 8>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
// Define the smem layouts (static)
auto sA = make_layout(make_shape(bM,bK), LayoutRight{}); // (m,k) -> smem_idx; k-major
auto sB = make_layout(make_shape(bN,bK), LayoutRight{}); // (n,k) -> smem_idx; k-major
auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx; m-major
// Define the thread layouts (static)
auto tA = make_layout(make_shape(Int<32>{}, Int< 8>{}), LayoutRight{}); // (m,k) -> thr_idx; k-major
auto tB = make_layout(make_shape(Int<32>{}, Int< 8>{}), LayoutRight{}); // (n,k) -> thr_idx; k-major
auto tC = make_layout(make_shape(Int<16>{}, Int<16>{})); // (m,n) -> thr_idx; m-major
dim3 dimBlock(size(tC));
dim3 dimGrid(size(ceil_div(M, bM)),
size(ceil_div(N, bN)));
gemm_device<<<dimGrid, dimBlock, 0, stream>>>
(prob_shape, cta_tiler,
A, dA, sA, tA,
B, dB, sB, tB,
C, dC, sC, tC,
alpha, beta);
}
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm(char transA, char transB, int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
if (transA == 'N' && transB == 'T') {
return gemm_nt(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
} else
if (transA == 'T' && transB == 'N') {
return gemm_tn(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
}
assert(false && "Not implemented");
}
int main(int argc, char** argv)
{
int m = 5120;
if (argc >= 2)
sscanf(argv[1], "%d", &m);
int n = 5120;
if (argc >= 3)
sscanf(argv[2], "%d", &n);
int k = 4096;
if (argc >= 4)
sscanf(argv[3], "%d", &k);
char transA = 'N';
if (argc >= 5)
sscanf(argv[4], "%c", &transA);
char transB = 'T';
if (argc >= 6)
sscanf(argv[5], "%c", &transB);
using TA = float;
using TB = float;
using TC = float;
using TI = float;
TI alpha = 1.0;
TI beta = 0.0;
std::cout << "M = " << m << std::endl;
std::cout << "N = " << n << std::endl;
std::cout << "K = " << k << std::endl;
std::cout << "C = A^" << transA << " B^" << transB << std::endl;
cute::device_init(0);
thrust::host_vector<TA> h_A(m*k);
thrust::host_vector<TB> h_B(n*k);
thrust::host_vector<TC> h_C(m*n);
for (int j = 0; j < m*k; ++j) h_A[j] = static_cast<TA>( 2*(rand() / double(RAND_MAX)) - 1 );
for (int j = 0; j < n*k; ++j) h_B[j] = static_cast<TB>( 2*(rand() / double(RAND_MAX)) - 1 );
for (int j = 0; j < m*n; ++j) h_C[j] = static_cast<TC>(-1);
thrust::device_vector<TA> d_A = h_A;
thrust::device_vector<TB> d_B = h_B;
thrust::device_vector<TC> d_C = h_C;
double gflops = (2.0*m*n*k) * 1e-9;
const int timing_iterations = 100;
GPU_Clock timer;
int ldA = 0, ldB = 0, ldC = m;
if (transA == 'N') {
ldA = m;
} else if (transA == 'T') {
ldA = k;
} else {
assert(false);
}
if (transB == 'N') {
ldB = k;
} else if (transB == 'T') {
ldB = n;
} else {
assert(false);
}
// Run once
d_C = h_C;
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
CUTE_CHECK_LAST();
thrust::host_vector<TC> cute_result = d_C;
// Timing iterations
timer.start();
for (int i = 0; i < timing_iterations; ++i) {
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
}
double cute_time = timer.seconds() / timing_iterations;
CUTE_CHECK_LAST();
printf("CUTE_GEMM: [%6.1f]GFlop/s (%6.4f)ms\n", gflops / cute_time, cute_time*1000);
return 0;
}
| examples/cute/tutorial/sgemm_1.cu/0 | {
"file_path": "examples/cute/tutorial/sgemm_1.cu",
"repo_id": "examples",
"token_count": 8242
} | 14 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/numeric/integral_constant.hpp> // cute::true_type, cute::false_type
#include <cute/numeric/integer_sequence.hpp>
#include <cute/container/cuda_types.hpp>
//#include <cute/container/array.hpp> // Advanced optimizations
//
// cute::tuple is like std::tuple, with two differences.
//
// 1. It works on both host and device.
// 2. Its template arguments must be semiregular types.
//
// Semiregular types are default constructible and copyable.
// They include "value types" like int or float,
// but do _not_ include references like int& or float&.
// (See std::tie for an example of a tuple of references.)
//
// This is simplified over the implementations in std::, cuda::std::, and thrust:: by ignoring much of
// the conversion SFINAE, special overloading, and avoiding cvref template types.
// Furthermore, the empty base optimization (EBO) is MORE aggressive by avoiding
// construction calls, and ignoring any need for unique element addresses.
//
// Over standard-conforming tuple implementations, this appears to accelerate compilation times by over 3x.
namespace cute
{
namespace detail
{
// EBO stands for "empty base optimization."
// We use this technique to ensure that cute::tuple
// doesn't need to waste space storing any template arguments
// of cute::tuple that have no data (like integral_constant).
// Otherwise, cute::tuple would need to spend at least 1 byte
// for each of its template arguments.
//
// EBO always "holds" a single value of type T.
// N is like an array index that TupleBase uses
// to access the desired tuple element.
template <size_t N, class T, bool IsEmpty = is_empty<T>::value>
struct EBO;
template <class T, size_t N, bool B>
CUTE_HOST_DEVICE constexpr C<N> findt(EBO<N, T, B> const&)
{ return {}; }
// Specialization for types T that have no data;
// the "static tuple leaf." Valid T here include
// integral_constant<U, Value>, Int<Value>,
// and any other semiregular type
// for which std::is_empty_v<T> is true.
template <size_t N, class T>
struct EBO<N, T, true>
{
CUTE_HOST_DEVICE constexpr
EBO() {}
CUTE_HOST_DEVICE constexpr
EBO(T const&) {}
};
template <size_t N, class T>
CUTE_HOST_DEVICE constexpr T getv(EBO<N, T, true> const&)
{ return {}; }
// Specialization for types T that are not empty;
// the "dynamic tuple leaf." Valid T here include int,
// any other integral or floating-point type,
// or any semiregular type for which std::is_empty_v<T> is false.
template <size_t N, class T>
struct EBO<N, T, false>
{
CUTE_HOST_DEVICE constexpr
EBO() : t_{} {}
template <class U>
CUTE_HOST_DEVICE constexpr
EBO(U const& u) : t_{u} {}
T t_;
};
template <size_t N, class T>
CUTE_HOST_DEVICE constexpr T const& getv(EBO<N, T, false> const& x)
{ return x.t_; }
template <size_t N, class T>
CUTE_HOST_DEVICE constexpr T& getv(EBO<N, T, false>& x)
{ return x.t_; }
template <size_t N, class T>
CUTE_HOST_DEVICE constexpr T&& getv(EBO<N, T, false>&& x)
{ return cute::move(x.t_); }
template <class IdxSeq, class... T>
struct TupleBase;
// Base class of cute::tuple binds each element to an index
// by inheriting from EBO<i, t> for each (i, t) in (I..., T...).
// The storage (for nonempty t) lives in the base classes.
template <size_t... I, class... T>
struct TupleBase<index_sequence<I...>, T...>
: EBO<I,T>...
{
CUTE_HOST_DEVICE constexpr
TupleBase() {}
template <class... U>
CUTE_HOST_DEVICE constexpr explicit
TupleBase(U const&... u)
: EBO<I,T>(u)... {}
template <class... U>
CUTE_HOST_DEVICE constexpr
TupleBase(TupleBase<index_sequence<I...>, U...> const& u)
: EBO<I,T>(getv(static_cast<EBO<I,U> const&>(u)))... {}
};
} // end namespace detail
// Attempting to use the following commented-out alias
// in the declaration of `struct tuple` causes MSVC 2022 build errors.
//
//template <class... T>
//using TupleBase = detail::TupleBase<make_index_sequence<sizeof...(T)>, T...>;
// This is the actual cute::tuple class.
// The storage (if any) lives in TupleBase's EBO base classes.
//
// Inheriting from the above alias TupleBase
// causes MSVC 2022 build errors when assigning one tuple to another:
// In summary: this is verbose as a work-around for MSVC build errors.
template <class... T>
struct tuple : detail::TupleBase<make_index_sequence<sizeof...(T)>, T...>
{
CUTE_HOST_DEVICE constexpr
tuple() {}
template <class... U>
CUTE_HOST_DEVICE constexpr
tuple(U const&... u) : detail::TupleBase<make_index_sequence<sizeof...(T)>, T...>(u...) {}
template <class... U>
CUTE_HOST_DEVICE constexpr
tuple(tuple<U...> const& u)
: detail::TupleBase<make_index_sequence<sizeof...(T)>, T...>(static_cast<detail::TupleBase<make_index_sequence<sizeof...(U)>, U...> const&>(u)) {}
};
//
// get for cute::tuple (just like std::get for std::tuple)
//
template <size_t I, class... T>
CUTE_HOST_DEVICE constexpr
decltype(auto)
get(tuple<T...> const& t) noexcept
{
static_assert(I < sizeof...(T), "Index out of range");
return detail::getv<I>(t);
}
template <size_t I, class... T>
CUTE_HOST_DEVICE constexpr
decltype(auto)
get(tuple<T...>& t) noexcept
{
static_assert(I < sizeof...(T), "Index out of range");
return detail::getv<I>(t);
}
template <size_t I, class... T>
CUTE_HOST_DEVICE constexpr
decltype(auto)
get(tuple<T...>&& t) noexcept
{
static_assert(I < sizeof...(T), "Index out of range");
return detail::getv<I>(static_cast<tuple<T...>&&>(t));
}
//
// find a type X within a cute::tuple
// Requires X to be unique in tuple
// Returns a static integer
//
template <class X, class... T>
CUTE_HOST_DEVICE constexpr
auto
find(tuple<T...> const& t) noexcept
{
return detail::findt<X>(t);
}
//
// Custom is_tuple trait simply checks the existence of tuple_size
// and assumes std::get<I>(.), std::tuple_element<I,.>
//
namespace detail {
template <class T>
auto has_tuple_size( T*) -> bool_constant<(0 <= tuple_size<T>::value)>;
auto has_tuple_size(...) -> false_type;
} // end namespace detail
template <class T>
struct is_tuple : decltype(detail::has_tuple_size((T*)0)) {};
//
// make_tuple (value-based implementation)
//
template <class... T>
CUTE_HOST_DEVICE constexpr
tuple<T...>
make_tuple(T const&... t)
{
return {t...};
}
//
// tuple_cat concatenates multiple cute::tuple into a single cute::tuple,
// just like std::tuple_cat for std::tuple.
//
#if 0
// Original implementation
namespace detail {
template <class T0, class T1,
size_t... I0, size_t... I1>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1,
index_sequence<I0...>, index_sequence<I1...>)
{
return cute::make_tuple(get<I0>(t0)..., get<I1>(t1)...);
}
} // end namespace detail
CUTE_HOST_DEVICE constexpr
tuple<>
tuple_cat()
{
return {};
}
template <class Tuple,
__CUTE_REQUIRES(is_tuple<Tuple>::value)>
CUTE_HOST_DEVICE constexpr
Tuple const&
tuple_cat(Tuple const& t)
{
return t;
}
template <class T0, class T1>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1)
{
return detail::tuple_cat(t0, t1,
make_index_sequence<tuple_size<T0>::value>{},
make_index_sequence<tuple_size<T1>::value>{});
}
template <class T0, class T1, class T2, class... Ts>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2, Ts const&... ts)
{
return cute::tuple_cat(cute::tuple_cat(t0,t1),t2,ts...);
}
#endif
#if 1
// Extended implementation
namespace detail {
template <class T0, class T1,
size_t... I0, size_t... I1>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1,
index_sequence<I0...>, index_sequence<I1...>)
{
return cute::make_tuple(get<I0>(t0)..., get<I1>(t1)...);
}
template <class T0, class T1, class T2,
size_t... I0, size_t... I1, size_t... I2>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2,
index_sequence<I0...>, index_sequence<I1...>, index_sequence<I2...>)
{
return cute::make_tuple(get<I0>(t0)..., get<I1>(t1)..., get<I2>(t2)...);
}
template <class T0, class T1, class T2, class T3,
size_t... I0, size_t... I1, size_t... I2, size_t... I3>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2, T3 const& t3,
index_sequence<I0...>, index_sequence<I1...>, index_sequence<I2...>, index_sequence<I3...>)
{
return cute::make_tuple(get<I0>(t0)..., get<I1>(t1)..., get<I2>(t2)..., get<I3>(t3)...);
}
template <class T0, class T1, class T2, class T3, class T4,
size_t... I0, size_t... I1, size_t... I2, size_t... I3, size_t... I4>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2, T3 const& t3, T4 const& t4,
index_sequence<I0...>, index_sequence<I1...>, index_sequence<I2...>, index_sequence<I3...>, index_sequence<I4...>)
{
return cute::make_tuple(get<I0>(t0)..., get<I1>(t1)..., get<I2>(t2)..., get<I3>(t3)..., get<I4>(t4)...);
}
template <class T0, class T1>
struct tuple_cat_static;
template <class... T0s, class... T1s>
struct tuple_cat_static<tuple<T0s...>, tuple<T1s...>> {
using type = tuple<T0s..., T1s...>;
};
} // end namespace detail
CUTE_HOST_DEVICE constexpr
tuple<>
tuple_cat()
{
return {};
}
template <class Tuple,
__CUTE_REQUIRES(is_tuple<Tuple>::value)>
CUTE_HOST_DEVICE constexpr
Tuple const&
tuple_cat(Tuple const& t)
{
return t;
}
template <class T0, class T1>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1)
{
if constexpr (is_static<T0>::value && is_static<T1>::value &&
is_tuple<T0>::value && is_tuple<T1>::value) {
return typename detail::tuple_cat_static<T0, T1>::type{};
} else {
return detail::tuple_cat(t0, t1,
make_index_sequence<tuple_size<T0>::value>{},
make_index_sequence<tuple_size<T1>::value>{});
}
CUTE_GCC_UNREACHABLE;
}
template <class T0, class T1, class T2>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2)
{
return detail::tuple_cat(t0, t1, t2,
make_index_sequence<tuple_size<T0>::value>{},
make_index_sequence<tuple_size<T1>::value>{},
make_index_sequence<tuple_size<T2>::value>{});
}
template <class T0, class T1, class T2, class T3>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2, T3 const& t3)
{
return detail::tuple_cat(t0, t1, t2, t3,
make_index_sequence<tuple_size<T0>::value>{},
make_index_sequence<tuple_size<T1>::value>{},
make_index_sequence<tuple_size<T2>::value>{},
make_index_sequence<tuple_size<T3>::value>{});
}
template <class T0, class T1, class T2, class T3, class T4>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2, T3 const& t3, T4 const& t4)
{
return detail::tuple_cat(t0, t1, t2, t3, t4,
make_index_sequence<tuple_size<T0>::value>{},
make_index_sequence<tuple_size<T1>::value>{},
make_index_sequence<tuple_size<T2>::value>{},
make_index_sequence<tuple_size<T3>::value>{},
make_index_sequence<tuple_size<T4>::value>{});
}
template <class T0, class T1, class T2, class T3, class T4, class T5, class... Ts>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2, T3 const& t3, T4 const& t4, T5 const& t5, Ts const&... ts)
{
return cute::tuple_cat(cute::tuple_cat(t0,t1,t2,t3,t4), cute::tuple_cat(t5, ts...));
}
#endif
#if 0
// Outer-Inner indexing trick to concat all tuples at once
namespace detail {
template <size_t... Ns>
struct tuple_cat_helper
{
static constexpr cute::array<size_t,sizeof...(Ns)> ns = {Ns...};
static constexpr size_t total_size() {
size_t sum = 0;
for (size_t n : ns) sum += n;
return sum;
}
static constexpr size_t total_size_ = total_size();
static constexpr auto values() {
cute::array<size_t[2],total_size_> outer_inner = {};
size_t idx = 0;
for (size_t i = 0; i < ns.size(); ++i) {
for (size_t j = 0; j < ns[i]; ++j, ++idx) {
outer_inner[idx][0] = i;
outer_inner[idx][1] = j;
}
}
return outer_inner;
}
static constexpr auto outer_inner_ = values();
using total_sequence = make_index_sequence<total_size_>;
};
template <class Helper, class Tuple, size_t... I>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(Tuple const& t, index_sequence<I...>)
{
return cute::make_tuple(get<Helper::outer_inner_[I][1]>(get<Helper::outer_inner_[I][0]>(t))...);
}
template <class T0, class T1,
size_t... I0, size_t... I1>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1,
index_sequence<I0...>, index_sequence<I1...>)
{
return cute::make_tuple(get<I0>(t0)..., get<I1>(t1)...);
}
} // end namespace detail
CUTE_HOST_DEVICE constexpr
tuple<>
tuple_cat()
{
return {};
}
template <class Tuple,
__CUTE_REQUIRES(is_tuple<Tuple>::value)>
CUTE_HOST_DEVICE constexpr
Tuple const&
tuple_cat(Tuple const& t)
{
return t;
}
template <class T0, class T1>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(T0 const& t0, T1 const& t1)
{
return detail::tuple_cat(t0, t1,
make_index_sequence<tuple_size<T0>::value>{},
make_index_sequence<tuple_size<T1>::value>{});
}
template <class... Tuples>
CUTE_HOST_DEVICE constexpr
auto
tuple_cat(Tuples const&... ts)
{
using Helper = detail::tuple_cat_helper<tuple_size<Tuples>::value...>;
return detail::tuple_cat<Helper>(cute::make_tuple(ts...), typename Helper::total_sequence{});
}
#endif
//
// Equality operators
//
namespace detail {
template <size_t I, class TupleA, class TupleB>
CUTE_HOST_DEVICE constexpr
auto
equal_impl(TupleA const& a, TupleB const& b)
{
if constexpr (I == tuple_size<TupleA>::value) {
return cute::true_type{}; // Terminal: TupleA is exhausted
} else if constexpr (I == tuple_size<TupleB>::value) {
return cute::false_type{}; // Terminal: TupleA is not exhausted, TupleB is exhausted
} else {
return (get<I>(a) == get<I>(b)) && equal_impl<I+1>(a,b);
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
template <class TupleT, class TupleU,
__CUTE_REQUIRES(is_tuple<TupleT>::value && is_tuple<TupleU>::value)>
CUTE_HOST_DEVICE constexpr
auto
operator==(TupleT const& t, TupleU const& u)
{
return detail::equal_impl<0>(t, u);
}
template <class TupleT, class TupleU,
__CUTE_REQUIRES(is_tuple<TupleT>::value ^ is_tuple<TupleU>::value)>
CUTE_HOST_DEVICE constexpr
auto
operator==(TupleT const& t, TupleU const& u)
{
return cute::false_type{};
}
template <class TupleT, class TupleU,
__CUTE_REQUIRES(is_tuple<TupleT>::value && is_tuple<TupleU>::value)>
CUTE_HOST_DEVICE constexpr
auto
operator!=(TupleT const& t, TupleU const& u)
{
return !(t == u);
}
template <class TupleT, class TupleU,
__CUTE_REQUIRES(is_tuple<TupleT>::value ^ is_tuple<TupleU>::value)>
CUTE_HOST_DEVICE constexpr
auto
operator!=(TupleT const& t, TupleU const& u)
{
return cute::true_type{};
}
//
// Comparison operators
//
//
// There are many ways to compare tuple of elements and because CuTe is built
// on parameterizing layouts of coordinates, some comparisons are appropriate
// only in certain cases.
// -- lexicographical comparison [reverse, reflected, revref]
// -- colexicographical comparison [reverse, reflected, revref]
// -- element-wise comparison [any,all]
// This can be very confusing. To avoid errors in selecting the appropriate
// comparison, op<|op<=|op>|op>= are *not* implemented for cute::tuple.
//
// That said, see int_tuple for more explicitly named common comparison ops.
//
//
// Display utilities
//
namespace detail {
template <class Tuple, size_t... Is>
CUTE_HOST_DEVICE void print_tuple(Tuple const& t,
index_sequence<Is...>, char s = '(', char e = ')')
{
using cute::print;
((void(print(Is == 0 ? s : ',')), void(print(get<Is>(t)))), ...); print(e);
}
#if !defined(__CUDACC_RTC__)
template <class Tuple, std::size_t... Is>
CUTE_HOST std::ostream& print_tuple_os(std::ostream& os, Tuple const& t,
index_sequence<Is...>, char s = '(', char e = ')')
{
(void(os << (Is == 0 ? s : ',') << get<Is>(t)), ...);
return os << e;
}
#endif // !defined(__CUDACC_RTC__)
} // end namespace detail
template <class Tuple,
__CUTE_REQUIRES(is_tuple<Tuple>::value)>
CUTE_HOST_DEVICE void print(Tuple const& t)
{
return detail::print_tuple(t, make_index_sequence<tuple_size<Tuple>::value>{});
}
#if !defined(__CUDACC_RTC__)
template <class Tuple,
__CUTE_REQUIRES(is_tuple<Tuple>::value)>
CUTE_HOST std::ostream& operator<<(std::ostream& os, Tuple const& t)
{
return detail::print_tuple_os(os, t, make_index_sequence<tuple_size<Tuple>::value>{});
}
#endif // !defined(__CUDACC_RTC__)
} // end namespace cute
namespace CUTE_STL_NAMESPACE
{
template <class... T>
struct tuple_size<cute::tuple<T...>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)>
{};
template <size_t I, class... T>
struct tuple_element<I, cute::tuple<T...>>
: CUTE_STL_NAMESPACE::tuple_element<I, CUTE_STL_NAMESPACE::tuple<T...>>
{};
template <class... T>
struct tuple_size<const cute::tuple<T...>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)>
{};
template <size_t I, class... T>
struct tuple_element<I, const cute::tuple<T...>>
: CUTE_STL_NAMESPACE::tuple_element<I, const CUTE_STL_NAMESPACE::tuple<T...>>
{};
} // end namespace CUTE_STL_NAMESPACE
//
// std compatibility
//
#ifdef CUTE_STL_NAMESPACE_IS_CUDA_STD
namespace std
{
#if defined(__CUDACC_RTC__)
template <class... _Tp>
struct tuple_size;
template <size_t _Ip, class... _Tp>
struct tuple_element;
#endif
template <class... T>
struct tuple_size<cute::tuple<T...>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)>
{};
template <size_t I, class... T>
struct tuple_element<I, cute::tuple<T...>>
: CUTE_STL_NAMESPACE::tuple_element<I, CUTE_STL_NAMESPACE::tuple<T...>>
{};
template <class... T>
struct tuple_size<const cute::tuple<T...>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)>
{};
template <size_t I, class... T>
struct tuple_element<I, const cute::tuple<T...>>
: CUTE_STL_NAMESPACE::tuple_element<I, const CUTE_STL_NAMESPACE::tuple<T...>>
{};
} // end namepsace std
#endif // CUTE_STL_NAMESPACE_IS_CUDA_STD
| include/cute/container/tuple.hpp/0 | {
"file_path": "include/cute/container/tuple.hpp",
"repo_id": "include",
"token_count": 8428
} | 15 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/util.hpp> // cast_smem_ptr_to_uint
#include <cute/pointer.hpp>
#include <cute/pointer_swizzle.hpp>
#include <cute/swizzle_layout.hpp>
#include <cute/tensor.hpp>
namespace cute
{
//
// Stand-in Swizzle Layout
// A model of a nullptr smem_ptr<T> with B == sizeof_bits<T>::value
// That represents an unset pointer. This is a placeholder type that is waiting for an smem_ptr
//
template <int Bits>
struct smem_ptr_flag_bits : Int<0> {};
using smem_ptr_flag = smem_ptr_flag_bits<1>;
// A flagged construction method to transform ComposedLayout
// Make a swizzle pointer tensor and check that the intended type size matches
template <class Iterator, class SwizzleFn, int B, class Layout>
CUTE_HOST_DEVICE constexpr
auto
make_tensor(Iterator const& ptr,
ComposedLayout<SwizzleFn,smem_ptr_flag_bits<B>,Layout> const& layout)
{
static_assert(is_smem<Iterator>::value, "Expected smem.");
static_assert(B == sizeof_bits<iter_value_t<Iterator>>::value, "Expected a B-bit pointer type.");
return make_tensor(make_smem_ptr(ptr.get(), layout.layout_a()),
layout.layout_b());
}
// NOTE: To preserve smem_ptr_flag_bits under recast ops
template <int N, class SwizzleFn, int B, class Layout>
CUTE_HOST_DEVICE constexpr
auto
upcast(ComposedLayout<SwizzleFn,smem_ptr_flag_bits<B>,Layout> const& layout)
{
return composition(layout.layout_a(), smem_ptr_flag_bits<B*N>{}, upcast<N>(layout.layout_b()));
}
template <int N, class SwizzleFn, int B, class Layout>
CUTE_HOST_DEVICE constexpr
auto
downcast(ComposedLayout<SwizzleFn,smem_ptr_flag_bits<B>,Layout> const& layout)
{
return composition(layout.layout_a(), smem_ptr_flag_bits<B/N>{}, downcast<N>(layout.layout_b()));
}
//
// Conversion with swizzle_layout
//
template <class SwizzleFn, int B, class Layout>
CUTE_HOST_DEVICE
auto
as_position_independent_swizzle_layout(ComposedLayout<SwizzleFn,smem_ptr_flag_bits<B>,Layout> const& layout)
{
return composition(recast_layout<uint8_t,uint_bit_t<B>>(layout.layout_a()), Int<0>{}, layout.layout_b());
}
template <class Tensor>
CUTE_HOST_DEVICE
auto
as_position_independent_swizzle_tensor(Tensor&& tensor)
{
static_assert(is_smem<remove_cvref_t<Tensor>>::value, "Expected smem tensor.");
using SwizzleFn = get_swizzle_t<remove_cvref_t<Tensor>>;
if constexpr (SwizzleFn::num_bits == 0) {
return tensor;
} else {
#if !defined(NDEBUG)
{
uint32_t address = cast_smem_ptr_to_uint(raw_pointer_cast(static_cast<Tensor&&>(tensor).data()));
uint32_t mask = ((uint32_t(1) << SwizzleFn::num_base) - 1) | SwizzleFn::swizzle_code;
assert((address & mask) == 0); // Alignment to the Base, Z, and Y of Swizzle
}
#endif
using T = typename remove_cvref_t<Tensor>::value_type;
// Recast swizzle from acting on byte-addressed pointers to elements of type-T
auto new_swizzle = recast_layout<uint8_t, T>(SwizzleFn{});
// Strip off everything and create a new smem_ptr for type-T
auto new_ptr = make_smem_ptr<T>(raw_pointer_cast(static_cast<Tensor&&>(tensor).data()));
return make_tensor(new_ptr, composition(new_swizzle, Int<0>{}, tensor.layout()));
}
CUTE_GCC_UNREACHABLE;
}
//
// Display utilities
//
// Capture and cast smem_ptr_flag Layouts to offset-0 layouts
template <class SwizzleFn, int B, class Layout>
CUTE_HOST_DEVICE
void
print_layout(ComposedLayout<SwizzleFn,smem_ptr_flag_bits<B>,Layout> const& layout)
{
print_layout(as_position_independent_swizzle_layout(layout));
}
template <class SwizzleFn, int B, class Layout>
CUTE_HOST_DEVICE
void
print_latex(ComposedLayout<SwizzleFn,smem_ptr_flag_bits<B>,Layout> const& layout)
{
print_latex(as_position_independent_swizzle_layout(layout));
}
template <int B>
CUTE_HOST_DEVICE void print(smem_ptr_flag_bits<B> ptr)
{
printf("smem_ptr[%db](unset)", B);
}
} // end namespace cute
| include/cute/pointer_flagged.hpp/0 | {
"file_path": "include/cute/pointer_flagged.hpp",
"repo_id": "include",
"token_count": 1960
} | 16 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Architecture-specific operators on memory added for SM75
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cute/arch/copy_sm75.hpp"
#include "cute/arch/util.hpp"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Layout of destination matrix (column-major implies transpose)
typename Layout,
/// .x1, .x2, or .x4
int MatrixCount
>
inline __device__ void ldsm(Array<unsigned, MatrixCount> & D, void const* ptr);
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Determine the appropriate way to target PTX's "ldmatrix" instruction.
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// CUTLASS helper to get SMEM pointer
inline __device__ unsigned cutlass_get_smem_pointer(void *ptr) {
return cute::cast_smem_ptr_to_uint(ptr);
}
/// CUTLASS helper to get SMEM pointer
inline __device__ unsigned cutlass_get_smem_pointer(void const *ptr) {
return cutlass_get_smem_pointer(const_cast<void *>(ptr));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::RowMajor, 1>(
Array<unsigned, 1> & D,
void const* ptr) {
#if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x;
asm volatile ("ldmatrix.sync.aligned.x1.m8n8.shared.b16 {%0}, [%1];" : "=r"(x) : "r"(addr));
reinterpret_cast<int &>(D) = x;
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::RowMajor, 2>(
Array<unsigned, 2> & D,
void const* ptr) {
#if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y;
asm volatile ("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];" : "=r"(x), "=r"(y) : "r"(addr));
reinterpret_cast<int2 &>(D) = make_int2(x, y);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::RowMajor, 4>(
Array<unsigned, 4> & D,
void const* ptr) {
#if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y, z, w;
asm volatile ("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];" : "=r"(x), "=r"(y), "=r"(z), "=r"(w) : "r"(addr));
reinterpret_cast<int4 &>(D) = make_int4(x, y, z, w);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Transpose on 16b granularity
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::ColumnMajor, 1>(
Array<unsigned, 1> & D,
void const* ptr) {
#if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x;
asm volatile ("ldmatrix.sync.aligned.x1.trans.m8n8.shared.b16 {%0}, [%1];" : "=r"(x) : "r"(addr));
reinterpret_cast<int &>(D) = x;
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::ColumnMajor, 2>(
Array<unsigned, 2> & D,
void const* ptr) {
#if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y;
asm volatile ("ldmatrix.sync.aligned.x2.trans.m8n8.shared.b16 {%0, %1}, [%2];" : "=r"(x), "=r"(y) : "r"(addr));
reinterpret_cast<int2 &>(D) = make_int2(x, y);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::ColumnMajor, 4>(
Array<unsigned, 4> & D,
void const* ptr) {
#if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y, z, w;
asm volatile ("ldmatrix.sync.aligned.x4.trans.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];" : "=r"(x), "=r"(y), "=r"(z), "=r"(w) : "r"(addr));
reinterpret_cast<int4 &>(D) = make_int4(x, y, z, w);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType, int Bytes>
struct shared_load_op {
CUTLASS_DEVICE
shared_load_op(AccessType &D, void const *ptr) {
D = *reinterpret_cast<AccessType const *>(ptr);
}
};
template <typename AccessType>
CUTLASS_DEVICE void shared_load(AccessType &D, void const *ptr) {
shared_load_op<AccessType, int(sizeof(AccessType))>(D, ptr);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType>
struct shared_load_op<AccessType, 16> {
CUTLASS_DEVICE
shared_load_op(AccessType &D, void const *ptr) {
unsigned addr = cutlass_get_smem_pointer(ptr);
uint4 v;
asm volatile ("ld.shared.v4.b32 {%0, %1, %2, %3}, [%4];" :
"=r"(v.x), "=r"(v.y), "=r"(v.z), "=r"(v.w) : "r"(addr));
D = reinterpret_cast<AccessType const &>(v);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType>
struct shared_load_op<AccessType, 8> {
CUTLASS_DEVICE
shared_load_op(AccessType &D, void const *ptr) {
unsigned addr = cutlass_get_smem_pointer(ptr);
uint2 v;
asm volatile ("ld.shared.v2.b32 {%0, %1}, [%2];" :
"=r"(v.x), "=r"(v.y) : "r"(addr));
D = reinterpret_cast<AccessType const &>(v);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| include/cutlass/arch/memory_sm75.h/0 | {
"file_path": "include/cutlass/arch/memory_sm75.h",
"repo_id": "include",
"token_count": 2780
} | 17 |