text
stringlengths 27
947k
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
80
|
---|---|---|---|
var searchData=
[
['half_5ft',['half_t',['../structcutlass_1_1half__t.html',1,'cutlass']]],
['hosttensor',['HostTensor',['../classcutlass_1_1HostTensor.html',1,'cutlass']]]
];
| docs/search/classes_7.js/0 | {
"file_path": "docs/search/classes_7.js",
"repo_id": "docs",
"token_count": 78
} | 0 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <vector>
#include <iostream>
// Cutlass command line parser
#include "cutlass/util/command_line.h"
class Options {
public:
bool help;
bool good;
std::vector<int> extent; ///< extent of tile to fill
std::vector<int> stride; ///< stride vector for layout function
std::vector<int> output_shape; ///< output shape
int vectorize; ///< sequences of consecutive output elements are concatenated into a vector
/// if, and only if, they were consecutive in source memory
public:
/// Options
Options():
help(false),
good(true),
extent({32, 8}),
stride({32}),
output_shape({16, 8}),
vectorize(1) {
}
/// Constructs from command line parser
Options(cutlass::CommandLine const & cmd_line): help(false), good(true) {
if (cmd_line.check_cmd_line_flag("help") ||
cmd_line.check_cmd_line_flag("h")) {
help = true;
}
if (cmd_line.check_cmd_line_flag("extent")) {
cmd_line.get_cmd_line_arguments("extent", extent);
}
else {
extent = {32, 8};
}
if (cmd_line.check_cmd_line_flag("stride")) {
cmd_line.get_cmd_line_arguments("stride", stride);
}
int default_output_shape[] = {16, 8};
if (cmd_line.check_cmd_line_flag("output-shape")) {
cmd_line.get_cmd_line_arguments("output-shape", output_shape);
}
for (int i = int(output_shape.size()); i < 2; ++i) {
output_shape.push_back(default_output_shape[i]);
}
if (cmd_line.check_cmd_line_flag("vectorize")) {
cmd_line.get_cmd_line_argument("vectorize", vectorize);
}
else {
vectorize = 1;
}
if (output_shape.front() % vectorize) {
std::cerr << "Error: --vectorize=" << vectorize
<< " must divide contiguous elements in --output-shape="
<< output_shape.at(0) << "," << output_shape.at(1) << std::endl;
good = false;
}
}
/// Prints usage statement
static void print_usage(std::ostream &out) {
out
<< " Options:\n"
<< " --help Displays this help message.\n"
<< " --extent=<extent> Specifies the layout-specific extent (as comma-delimited array).\n"
<< " --stride=<stride> Specifies the layout-specific stride vector (comma-delimited array)\n"
<< " --output-shape=<extent> Specifies the dimensions of a row-major output matrix. \n"
<< " --vectorize=<vector length> If possible, vectorizes the output into vectors of consecutive elements\n";
}
};
| examples/03_visualize_layout/options.h/0 | {
"file_path": "examples/03_visualize_layout/options.h",
"repo_id": "examples",
"token_count": 1586
} | 1 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run convolution kernels using functions and data structures
provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU.
Writing a single high performance convolution kernel is hard but do-able. Whereas writing
high performance kernels at scale which works for multiple problem sizes with good abstractions is
really hard. CUTLASS solves this problem by providing simplified abstractions to compose
multiple sections of implicit gemm kernel. When used properly, the kernels can hit peak performance
of GPU easily.
CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp
and thread-block level, they compute on their own tile-size with higher level of tile sizes being
composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used
to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute
threadblock-tile (tile size computed by a threadblock).
In thie example, we split variable initialization into
1. Setting up data properties : describes how tensors are laid out in the memory and how the kernel
can view them (logical to physical mapping)
2. Setting up computation properties : describes how the above set tensors will be used to compute
output of convolution.
First, we setup the data types of the input tensor A, weights' tensor B and output tensor C along
with alpha, beta as the equation for convolution is C = alpha * Conv(A, B) + beta * C. In CUTLASS,
the kernels first compute Conv(A, B) and leave the rest of the computation to end of the kernel as
alpha * X + beta * C is a simple element-wise operation on X (Conv(A, B)) and C. We call this as
epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to
ElementComputeEpilogue = float. We want to use MMA instructions on Turing and they support 4-bit
signed integer. But int4b_t is not fully supported by Nvidia software stack, so CUTLASS introduces
cutlass::int4b_t. We use the data type for elements in input tensor A and B as cutlass::int4b_t. We
convey this to CUTLASS kernel by initializing template variables ElementAccumulator (int32_t),
ElementComputeEpilogue (float), ElementInputA (cutlass::int4b_t), ElementInputB (cutlass::int4b_t),
ElementOutput (int32_t). Communicating just the data type is not enough. As the data is laid out
linearly in memory, we have to convey the layout of tensors. We do that by initializing template
variables LayoutInputA, LayoutInputB and LayoutOutput to TensorNHWC cutlass variable. Next, we setup
rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template
variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of
elements per vector memory access (32), data type of accumulator (int32_t) and data type of
computation of linear combination (alpha * X + beta * C).
Now that we setup the properties of data, we have to setup properties of computation.
Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x128,
64x64x128, 8x8x32 (MxNxK) respectively. When passed to instantiate CUTLASS Implicit GEMM kernel, it
internally deduces the amount of threads needed per thread-block, amount of shared memory, storing
data in bank-conflict free manner, and ton of other variables required to compose, initialize and
launch a high performance Implicit GEMM kernel. This is the beauty of CUTLASS, it relieves developer
from understanding and coding complicated hardware optimizations which can easily go wrong.
CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines
constitute the whole process of loading input data from global memory to shared memory, loading data
from shared memory to registers, doing matrix multiplication, store to global memory. The below flow
sequence shows a typical mma pipeline.
tensor in global memory -> registers -> tile in shared memory -> registers -> mma -> registers ->
output to global memory
The problem with single pipeline is, each stage is synchronous which means, each stage has to wait
until the previous finished executing. There are stages in the pipeline which do not have fixed
latency, for example, the loads from global memory and shared memory. Therefore, we can add one more
pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads.
Finally, the pipeline in a kernel looks like
(1) tensor in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5)
mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) tensor in global
memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers ->
(9) output to global memory
This way, you can hide the second global memory load latency by doing computation on already loaded
input data.
There are few more template variables initialized such as, which threadblock tile of output matrix
is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on.
These are all put together to create a template variable which describes CUTLASS Implicit GEMM
kernel using cutlass::conv::device::ImplicitGemm template.
The next step is to initialize physical data, instantiate and initialize CUTLASS kernel and run it.
We use CUTLASS utilities to initialize, fill, compare tensors as they are simple and doesn't come
in the way of learning CUTLASS.
Once all the tensors are initialized and filled with data, create arguments tuple to launch CUTLASS
kernel which takes problem size (N = 1, H = 64, W = 64, C = 128), filter size (K = 64,
R = 3, S = 3, C = 128 ), padding, strides, dilation, tensors, alpha, beta and the
important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space
memory required by the kernel we instantiated. If yes, we create it and pass it along with other
arguments created to initialize CUTLASS kernel then, the kernel is launched.
In this example, we later on launch a reference convolution kernel (from CUTLASS utilities) to
compare if the output from CUTLASS kernel is same as the reference implicit GEMM kernel.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = int32_t; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::int4b_t; // Data type of elements in input tensor
using ElementInputB = cutlass::int4b_t; // Data type of elements in input tensor
using ElementOutput = cutlass::int4b_t; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm75;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 2;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationClamp<
ElementOutput, // Data type of output matrix.
8, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAddSaturate,
cutlass::conv::IteratorAlgorithm::kAnalytic
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(false),
measure_performance(true),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of int4b_t elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 32 elements.
//
int const kAlignment = 32;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "09_turing_tensorop_conv2dfprop example\n\n"
<< " This example uses Turing's Tensor Core operators on int4 data types to compute\n"
<< " forward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_c(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C on host with zeros
cutlass::reference::host::TensorFill(
tensor_c.host_view());
// Fill tensor C for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_c.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_ref_c.sync_device();
//
// Define arguments for CUTLASS Convolution
//
// mode (kCrossCorrelation or kConvolution)
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices);
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_c.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on host...\n";
// Compute with reference implementation
cutlass::reference::host::Conv2dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
ElementOutput,
cutlass::NumericConverterClamp<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_a.host_ref(),
tensor_b.host_ref(),
tensor_c.host_ref(),
tensor_ref_c.host_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_c.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_c.host_view(),
tensor_ref_c.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "09_tensor_conv_workspace_conv2dfprop_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_c.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
// Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2.
//
// CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
return 0;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major > 7 || (props.major == 7 && props.minor >= 5))) {
std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75."
<< std::endl;
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {1, 32, 64, 128, 256, 512};
struct Benchmark {
int h, w, c, k, r, s;
} layers[] = {
{56, 56, 64, 256, 1, 1},
{56, 56, 64, 64, 1, 1},
{56, 56, 64, 64, 3, 3},
{56, 56, 256, 64, 1, 1},
{56, 56, 256, 512, 1, 1},
{56, 56, 256, 128, 1, 1},
{28, 28, 128, 128, 3, 3},
{28, 28, 128, 512, 1, 1},
{28, 28, 512, 128, 1, 1},
{28, 28, 512, 1024, 1, 1},
{28, 28, 512, 256, 1, 1},
{14, 14, 256, 256, 3, 3},
{14, 14, 256, 1024, 1, 1},
{14, 14, 1024, 256, 1, 1},
{14, 14, 1024, 2048, 1, 1},
{14, 14, 1024, 512, 1, 1},
{7, 7, 512, 512, 3, 3},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/09_turing_tensorop_conv2dfprop/turing_tensorop_conv2dfprop.cu/0 | {
"file_path": "examples/09_turing_tensorop_conv2dfprop/turing_tensorop_conv2dfprop.cu",
"repo_id": "examples",
"token_count": 10022
} | 2 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "device/b2b_implicit_gemm_convolution.h"
#include "b2b_conv2d_run.h"
#include "test_run.h"
////////////////////////////////////////////////////////////////////////////////
cutlass::conv::Conv2dProblemSize conv2d_f16_sm75_problem_size_0 (
{32, 56, 56, 64}, // input size (NHWC)
{64, 3, 3, 64}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
{32, 56, 56, 64} // output size (NPQK)
);
cutlass::conv::Conv2dProblemSize conv2d_f16_sm75_problem_size_1 (
{32, 56, 56, 64}, // input size (NHWC)
{128, 1, 1, 64}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
{32, 56, 56, 128} // output size (NPQK)
);
bool run_nonfused_conv2d_fprop_optimized_f16_sm75() {
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
ElementCompute alpha0 = ElementCompute(1);
ElementCompute beta0 = ElementCompute(1); //beta=1 for bias
ElementCompute alpha1 = ElementCompute(1);
ElementCompute beta1 = ElementCompute(1); //beta=1 for bias
using ThreadblockShape0 = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape0 = cutlass::gemm::GemmShape<32, 32, 32>;
using ThreadblockShape1 = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape1 = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Conv2dFpropKernel0 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementA, cutlass::layout::TensorNHWC,
ElementB, cutlass::layout::TensorNHWC,
ElementC, cutlass::layout::TensorNHWC,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm75,
ThreadblockShape0,
WarpShape0,
InstructionShape,
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
128 / cutlass::sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
2,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using Conv2dFprop0 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel0>;
using Conv2dFpropKernel1 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementA, cutlass::layout::TensorNHWC,
ElementB, cutlass::layout::TensorNHWC,
ElementC, cutlass::layout::TensorNHWC,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm75,
ThreadblockShape1,
WarpShape1,
InstructionShape,
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
128 / cutlass::sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
2,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using Conv2dFprop1 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel1>;
B2bNonFusedConv2dRun<Conv2dFprop0, Conv2dFprop1> nonFusedConv2d;
std::cout << "Running Non-fused back-to-back FP16 Optimized Convolution Fprops...\n";
bool pass = nonFusedConv2d.run(conv2d_f16_sm75_problem_size_0, conv2d_f16_sm75_problem_size_1, cutlass::conv::SplitKMode::kSerial,
alpha0, beta0, alpha1, beta1);
if(pass)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return pass;
}
bool run_fused_conv2d_fprop_optimized_f16_sm75_rf_res() {
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
ElementCompute alpha0 = ElementCompute(1);
//Fused kernel has built-in bias, setting beta=0
ElementCompute beta0 = ElementCompute(0);
ElementCompute alpha1 = ElementCompute(1);
ElementCompute beta1 = ElementCompute(1); //use beta for bias
using ThreadblockShape0 = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape0 = cutlass::gemm::GemmShape<16, 64, 32>;
using ThreadblockShape1 = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape1 = cutlass::gemm::GemmShape<16, 128, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using EpilogueOutputOp0 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
InstructionShape::kM * InstructionShape::kN / 32,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>;
using EpilogueOutputOp1 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
128 / cutlass::sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>;
const bool SmemAccumulator = false;
using B2bConv2dFpropKernel = typename cutlass::conv::kernel::DefaultB2bConv2dFprop<
ElementA, cutlass::layout::TensorNHWC,
ElementB, cutlass::layout::TensorNHWC,
ElementC, cutlass::layout::TensorNHWC,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm75,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
2,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::IteratorAlgorithm::kOptimized,
SmemAccumulator
>::Kernel;
using B2bConv2dFprop = cutlass::conv::device::B2bImplicitGemmConvolution<B2bConv2dFpropKernel>;
B2bFusedConv2dRun<B2bConv2dFprop> fusedConv2d;
std::cout << "Running Fused back-to-back FP16 Optimized Convolution Fprops with RF Residency...\n";
bool pass = fusedConv2d.run(conv2d_f16_sm75_problem_size_0, conv2d_f16_sm75_problem_size_1, cutlass::conv::SplitKMode::kSerial,
alpha0, beta0, alpha1, beta1);
if(pass)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return pass;
}
int main() {
std::vector<bool (*)()>funcs = {
&run_nonfused_conv2d_fprop_optimized_f16_sm75,
&run_fused_conv2d_fprop_optimized_f16_sm75_rf_res
};
return testRun(75, funcs, "conv f16 RF residency");
}
////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/fused_two_convs_f16_sm75_rf.cu/0 | {
"file_path": "examples/13_two_tensor_op_fusion/fused_two_convs_f16_sm75_rf.cu",
"repo_id": "examples",
"token_count": 3348
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to call a CUTLASS SYRK kernel and provides a naive reference
matrix multiply kernel to verify its correctness.
The CUTLASS Syrk template is instantiated in the function CutlassSsyrkNN. This is kernel computes
the symmetric rank-k update (SYRK) using double-precision floating-point arithmetic and assumes
all matrices have column-major layout.
The threadblock tile size is chosen as 16x32x16 which offers good performance for large matrices.
See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available
in CUTLASS.
https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/
Aside from defining and launching the SSYRK kernel, this example does not use any other components
or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are
prevalent in the CUTLASS unit tests.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// Helper methods to check for errors
#include "helper.h"
//
// CUTLASS includes needed for double-precision SYRK kernel
//
// Defines cutlass::gemm::device::Syrk, the generic Syrk computation template class.
#include "cutlass/gemm/device/rank_k.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// This function defines a CUTLASS SYRK kernel instantiation, constructs its parameters object,
// and launches it on the CUDA device.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS SYRK template and launch a SYRK kernel.
cudaError_t CutlassSsyrkNN(
int N,
int K,
double alpha,
double const *A,
int lda,
double beta,
double *C,
int ldc) {
// Define type definition for double-precision CUTLASS SYRK with column-major
// input matrices and 16x32x16 threadblock tile size (chosen by default).
//
// To keep the interface manageable, several helpers are defined for plausible compositions
// including the following example for double-precision SYRK. Typical values are used as
// default template arguments.
//
// To view the full syrk device API interface, see `cutlass/gemm/device/syrk.h`
using ColumnMajor = cutlass::layout::ColumnMajor;
using CutlassSyrk = cutlass::gemm::device::RankK<
double,
ColumnMajor,
double,
ColumnMajor,
cutlass::FillMode::kLower,
double,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<16, 32, 16>,
cutlass::gemm::GemmShape<16, 16, 16>,
cutlass::gemm::GemmShape<8, 8, 4>,
cutlass::epilogue::thread::LinearCombination<
double,
1,
double,
double
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
5, // Stages
1, // AlignmentA
false, // SplitKSerail
cutlass::arch::OpMultiplyAdd,
cutlass::ComplexTransform::kNone,
cutlass::BlasMode::kSymmetric
>;
// Define a CUTLASS SYRK type
CutlassSyrk syrk_operator;
// Construct the CUTLASS SYRK arguments object.
//
// One of CUTLASS's design patterns is to define syrk argument objects that are constructible
// in host code and passed to kernels by value. These may include pointers, strides, scalars,
// and other arguments needed by Syrk and its components.
//
// The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible
// arguments to kernels and (2.) minimized initialization overhead on kernel entry.
//
CutlassSyrk::Arguments args(cutlass::gemm::GemmUniversalMode::kGemm,
{N, N, K}, // Syrk Problem dimensions
1, // batch_count,
{alpha, beta}, // Scalars used in the Epilogue
reinterpret_cast<void const *>(A),
const_cast<void *>(reinterpret_cast<void *>(C)),
reinterpret_cast<void *>(C), // destination matrix D (may be different memory than source C matrix)
(int64_t)N*K, // Batch strides
(int64_t)N*N,
(int64_t)N*N,
lda,
ldc,
ldc);
//
// Launch the CUTLASS SYRK kernel.
//
cutlass::Status status = syrk_operator(args);
//
// Return a cudaError_t if the CUTLASS SYRK operator returned an error code.
//
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
// Return success, if no errors were encountered.
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// The source code after this point in the file is generic CUDA using the CUDA Runtime API
// and simple CUDA kernels to initialize matrices and compute the general matrix product.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize a matrix with small integers.
__global__ void InitializeMatrix_kernel(
double *matrix,
int ldm,
int rows,
int columns,
int seed = 0) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
int offset = i + j * ldm;
// Generate arbitrary elements.
int const k = 16807;
int const m = 16;
double value = double(((offset + seed) * k % m) - m / 2);
matrix[offset] = value;
}
}
/// Simple function to initialize a matrix to arbitrary small integers.
cudaError_t InitializeMatrix(double *matrix, int ldm, int rows, int columns, int seed = 0) {
dim3 block(16, 16);
dim3 grid(
(rows + block.x - 1) / block.x,
(columns + block.y - 1) / block.y
);
InitializeMatrix_kernel<<< grid, block >>>(matrix, ldm, rows, columns, seed);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates device memory for a matrix then fills with arbitrary small integers.
cudaError_t AllocateMatrix(double **matrix, int ldm, int rows, int columns, int seed = 0) {
cudaError_t result;
size_t sizeof_matrix = sizeof(double) * ldm * columns;
// Allocate device memory.
result = cudaMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to allocate matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = cudaMemset(*matrix, 0, sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Initialize matrix elements to arbitrary small integers.
result = InitializeMatrix(*matrix, ldm, rows, columns, seed);
if (result != cudaSuccess) {
std::cerr << "Failed to initialize matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Naive reference SYRK computation.
__global__ void ReferenceSyrk_kernel(
int N,
int K,
double alpha,
double const *A,
int lda,
double beta,
double *C,
int ldc) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < N && j < N && i >= j ) { // Since C is in Lower Fill Mode
double accumulator = 0;
for (int k = 0; k < K; ++k) {
accumulator += A[i + k * lda] * A[j + k * lda];
}
C[i + j * ldc] = alpha * accumulator + beta * C[i + j * ldc];
}
}
/// Reference SYRK computation.
cudaError_t ReferenceSyrk(
int N,
int K,
double alpha,
double const *A,
int lda,
double beta,
double *C,
int ldc) {
dim3 block(16, 16);
dim3 grid(
(N + block.x - 1) / block.x,
(N + block.y - 1) / block.y
);
ReferenceSyrk_kernel<<< grid, block >>>(N, K, alpha, A, lda, beta, C, ldc);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a double-precision
/// CUTLASS SYRK kernel.
cudaError_t TestCutlassSyrk(int N, int K, double alpha, double beta) {
cudaError_t result;
//
// Define several matrices to be used as operands to SYRK kernels.
//
// Compute leading dimensions for each matrix.
int lda = N;
int ldc = N;
// Compute size in bytes of the C matrix.
size_t sizeof_C = sizeof(double) * ldc * N;
// Define pointers to matrices in GPU device memory.
double *A;
double *C_cutlass;
double *C_reference;
//
// Allocate matrices in GPU device memory with arbitrary seeds.
//
result = AllocateMatrix(&A, lda, N, K, 0);
if (result != cudaSuccess) {
return result;
}
result = AllocateMatrix(&C_cutlass, ldc, N, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
return result;
}
result = AllocateMatrix(&C_reference, ldc, N, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(C_cutlass);
return result;
}
result = cudaMemcpy(C_reference, C_cutlass, sizeof_C, cudaMemcpyDeviceToDevice);
if (result != cudaSuccess) {
std::cerr << "Failed to copy C_cutlass matrix to C_reference: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
//
// Launch CUTLASS SYRK.
//
result = CutlassSsyrkNN(N, K, alpha, A, lda, beta, C_cutlass, ldc);
if (result != cudaSuccess) {
std::cerr << "CUTLASS SYRK kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
//
// Verify.
//
// Launch reference SYRK
result = ReferenceSyrk(N, K, alpha, A, lda, beta, C_reference, ldc);
if (result != cudaSuccess) {
std::cerr << "Reference SYRK kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
// Copy to host and verify equivalence.
std::vector<double> host_cutlass(ldc * N, 0);
std::vector<double> host_reference(ldc * N, 0);
result = cudaMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy CUTLASS SYRK results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
result = cudaMemcpy(host_reference.data(), C_reference, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy Reference SYRK results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
//
// Free device memory allocations.
//
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
//
// Test for bit equivalence of results.
//
if (host_cutlass != host_reference) {
std::cerr << "CUTLASS results incorrect." << std::endl;
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to basic_syrk example.
//
// usage:
//
// 00_basic_syrk <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
bool notSupported = false;
// CUTLASS must be compiled with CUDA 11 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "This example requires compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
//
// Parse the command line to obtain SYRK dimensions and scalar values.
//
// SYRK problem dimensions.
int problem[2] = { 128, 128 };
for (int i = 1; i < argc && i < 3; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Scalars used for linear scaling the result of the matrix product.
double scalars[2] = { 1, 0 };
for (int i = 3; i < argc && i < 5; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 3];
}
//
// Run the CUTLASS SYRK test.
//
cudaError_t result = TestCutlassSyrk(
problem[0], // SYRK N dimension
problem[1], // SYRK K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| examples/31_basic_syrk/basic_syrk.cu/0 | {
"file_path": "examples/31_basic_syrk/basic_syrk.cu",
"repo_id": "examples",
"token_count": 5258
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief A file contains all functioning classes needed by GemmLayernorm.
GemmLayernorm example = GEMM0 with partial reduction fused in epilogue (EpilogueVisitorLayerNorm)
+ lightweight full reduction kernel (ApplyFinalReduction)
+ GEMM1 with elemenwise operations fused in mainloop (GemmLayernormMainloopFusion)
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <cmath>
#include <iostream>
#include <vector>
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/device/gemm_layernorm_mainloop_fusion.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/epilogue/threadblock/epilogue_with_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "gemm_with_epilogue_visitor.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementVariance_,
typename ElementMean_,
typename ElementLayernormCompute_,
typename ElementOutput,
typename ThreadblockShape_,
bool IsShiftedVariance_ = false
>
class ApplyFinalReduction {
public:
using ElementVariance = ElementVariance_;
using ElementMean = ElementMean_;
using ElementLayernormCompute = ElementLayernormCompute_;
using ThreadblockShape = ThreadblockShape_;
// Pre-processing has ensured the layout equivelent to RowMajor
using Layout = cutlass::layout::RowMajor;
using TensorVariance = TensorRef<ElementVariance, Layout>;
using TensorMean = TensorRef<ElementMean, Layout>;
static bool const kIsShiftedVariance = IsShiftedVariance_;
//
// Arguments
//
struct Arguments {
MatrixCoord extent; ///< Extent of D and Layernorm matrices
TensorVariance ref_Variance; ///< Sum Square or Variance tensor (input / output)
TensorMean ref_Mean; ///< Sum or Mean tensor (input / output)
ElementOutput *ptr_Shifted_K; ///< Shifted K tensor pointer
//
// Methods
//
Arguments(){ }
Arguments(
MatrixCoord extent_,
TensorVariance ref_Variance_,
TensorMean ref_Mean_,
ElementOutput *ptr_Shifted_K_
):
extent(extent_),
ref_Variance(ref_Variance_),
ref_Mean(ref_Mean_),
ptr_Shifted_K(ptr_Shifted_K_)
{
}
};
struct SharedStorage {
};
//
// Params struct
//
struct Params {
Arguments args;
//
// Methods
//
Params() { }
Params(Arguments const &args_): args(args_) { }
};
private:
public:
CUTLASS_DEVICE
ApplyFinalReduction() { }
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
apply(params, shared_storage);
}
private:
/// Partial reduction
CUTLASS_DEVICE
void apply(Params const ¶ms, SharedStorage &shared_storage) {
int threadblock_num = (params.args.extent.column() + ThreadblockShape::kM - 1) / ThreadblockShape::kM;
int block_n = blockIdx.x * blockDim.x;
int thread_n = threadIdx.x;
int idx_n = block_n + thread_n;
if (idx_n >= params.args.extent.row()) {
return;
}
using ConvertVarianceOutput = cutlass::NumericConverter<ElementVariance, ElementLayernormCompute>;
using ConvertMeanOutput = cutlass::NumericConverter<ElementMean, ElementLayernormCompute>;
using ConvertVariance = cutlass::NumericConverter<ElementLayernormCompute, ElementVariance>;
using ConvertMean = cutlass::NumericConverter<ElementLayernormCompute, ElementMean>;
using ConvertShiftK = cutlass::NumericConverter<ElementLayernormCompute, ElementOutput>;
ConvertVariance convert_variance;
ConvertMean convert_mean;
ConvertVarianceOutput convert_variance_output;
ConvertMeanOutput convert_mean_output;
ElementVariance *access_square = params.args.ref_Variance.data() + idx_n;
ElementMean *access_mean = params.args.ref_Mean.data() + idx_n;
ElementVariance *access_square_bak = access_square;
ElementMean *access_mean_bak = access_mean;
ElementLayernormCompute frag_square_sum = ElementLayernormCompute(0);
ElementLayernormCompute frag_element_sum = ElementLayernormCompute(0);
ElementVariance fetch_square;
ElementMean fetch_mean;
CUTLASS_PRAGMA_UNROLL
for (int idx_m = 0; idx_m < threadblock_num; idx_m++) {
arch::global_load<ElementVariance, sizeof(ElementVariance)>(fetch_square, access_square, true);
arch::global_load<ElementMean, sizeof(ElementMean)>(fetch_mean, access_mean, true);
frag_element_sum += convert_mean(fetch_mean);
frag_square_sum += convert_variance(fetch_square);
access_square += params.args.extent.row();
access_mean += params.args.extent.row();
}
ElementLayernormCompute mean = frag_element_sum;
ElementLayernormCompute square_mean = frag_square_sum;
ElementLayernormCompute variance;
if (kIsShiftedVariance && params.args.ptr_Shifted_K != nullptr) {
ElementOutput *access_shift_k = params.args.ptr_Shifted_K + idx_n;
ElementOutput fetch_shift_k;
ConvertShiftK convert_shift_k;
arch::global_load<ElementOutput, sizeof(ElementOutput)>(fetch_shift_k, access_shift_k, true);
ElementLayernormCompute shifted_mean = mean - convert_shift_k(fetch_shift_k);
variance = cutlass::constants::one<ElementLayernormCompute>() / cutlass::fast_sqrt(square_mean - shifted_mean * shifted_mean + ElementLayernormCompute(1e-6));
}else{
variance = cutlass::constants::one<ElementLayernormCompute>() / cutlass::fast_sqrt(square_mean - mean * mean + ElementLayernormCompute(1e-6));
}
mean = -mean * variance;
access_square = access_square_bak;
access_mean = access_mean_bak;
access_square[0] = convert_variance_output(variance);
access_mean[0] = convert_mean_output(mean);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ThreadblockShape_,
int ThreadCount,
typename OutputTileIterator_,
typename AccumulatorTile_,
typename ElementAccumulator_,
typename ElementVariance_,
typename ElementMean_,
typename ElementLayernormCompute_,
typename ElementwiseFunctor_,
bool IsShiftedVariance_ = false
>
class EpilogueVisitorLayerNorm {
public:
using ElementVariance = ElementVariance_;
using ElementMean = ElementMean_;
using ElementLayernormCompute = ElementLayernormCompute_;
using AccumulatorTile = AccumulatorTile_;
using ThreadblockShape = ThreadblockShape_;
static int const kThreadCount = ThreadCount;
using OutputTileIterator = OutputTileIterator_;
using ElementwiseFunctor = ElementwiseFunctor_;
static int const kIterations = OutputTileIterator::kIterations;
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
static int const kRowIterations = OutputTileIterator::ThreadMap::Iterations::kRow;
static int const kThreads = OutputTileIterator::ThreadMap::kThreads;
static bool const kIsShiftedVariance = IsShiftedVariance_;
using ElementOutput = typename OutputTileIterator::Element;
static int const kDeltaRow = OutputTileIterator::ThreadMap::Delta::kRow;
/// Array type used in Shift-K Layernorm
static int const kRowAccessCount = kIterations * kRowIterations;
using ConvertedShiftFragment = Array<ElementLayernormCompute, kRowAccessCount>;
// Conducts manual transpose externally (already supported) for column major
using LayoutOutput = cutlass::layout::RowMajor;
using ElementAccumulator = ElementAccumulator_;
using AccumulatorFragment = Array<ElementAccumulator, kElementsPerAccess>;
using LayernormFragment = Array<ElementLayernormCompute, kElementsPerAccess>;
using OutputVector = Array<ElementOutput, kElementsPerAccess>;
using TensorRefD = TensorRef<ElementOutput, LayoutOutput>;
static int const kThreadsPerRow = OutputTileIterator::ThreadMap::Detail::RowArrangement::Detail::kShapeWidth;
static int const kThreadsInColumn = kThreads / kThreadsPerRow;
static int const kHalfThreadsPerRow = (kThreadsPerRow >> 1);
/// Argument structure
struct Arguments {
typename ElementwiseFunctor::Params elementwise;
TensorRefD ref_C;
TensorRefD ref_D;
ElementVariance *ptr_Variance;
ElementMean *ptr_Mean;
ElementOutput *ptr_Shifted_K;
//
// Methods
//
Arguments():
ptr_Variance(nullptr),
ptr_Mean(nullptr),
ptr_Shifted_K(nullptr)
{
}
Arguments(
typename ElementwiseFunctor::Params elementwise_,
TensorRefD ref_C_,
TensorRefD ref_D_,
ElementVariance *ptr_Variance,
ElementMean *ptr_Mean_,
ElementOutput *ptr_Shifted_K_ = nullptr
):
elementwise(elementwise_),
ref_C(ref_C_),
ref_D(ref_D_),
ptr_Variance(ptr_Variance),
ptr_Mean(ptr_Mean_),
ptr_Shifted_K(ptr_Shifted_K_)
{
}
};
struct Params {
typename ElementwiseFunctor::Params elementwise;
typename OutputTileIterator::Params params_C;
typename OutputTileIterator::Params params_D;
typename OutputTileIterator::Element *ptr_C;
typename OutputTileIterator::Element *ptr_D;
ElementVariance *ptr_Variance;
ElementMean *ptr_Mean;
ElementOutput *ptr_Shifted_K;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
ptr_D(nullptr),
ptr_Variance(nullptr),
ptr_Mean(nullptr)
{
}
CUTLASS_HOST_DEVICE
Params(Arguments const &args):
elementwise(args.elementwise),
params_C(args.ref_C.layout()),
params_D(args.ref_D.layout()),
ptr_C(args.ref_C.data()),
ptr_D(args.ref_D.data()),
ptr_Variance(args.ptr_Variance),
ptr_Mean(args.ptr_Mean),
ptr_Shifted_K(args.ptr_Shifted_K)
{
}
};
/// Shared storage
struct SharedStorage {
};
private:
Params const & params_;
SharedStorage & shared_storage_;
MatrixCoord extent_;
ElementwiseFunctor elementwise_;
OutputTileIterator iterator_C_;
OutputTileIterator iterator_D_;
typename OutputTileIterator::Fragment fragment_C_;
typename OutputTileIterator::Fragment fragment_D_;
ElementAccumulator alpha_;
ElementAccumulator beta_;
ConvertedShiftFragment shift_k_frag_;
ElementLayernormCompute accum_sum_square_;
ElementLayernormCompute accum_sum_element_;
MatrixCoord thread_offset_;
public:
CUTLASS_DEVICE
EpilogueVisitorLayerNorm(
Params const ¶ms, ///< Parameters routed to the epilogue
SharedStorage &shared_storage, ///< Shared storage needed by the functors here
MatrixCoord const &problem_size0, ///< Problem size of the output
int thread_idx, ///< Thread index within the threadblock
int warp_idx, ///< Warp index within the threadblock
int lane_idx, ///< Lane index within the warp
MatrixCoord const &threadblock_offset = MatrixCoord(0, 0)
):
params_(params),
shared_storage_(shared_storage),
extent_(problem_size0),
elementwise_(params.elementwise),
iterator_C_(params.params_C, params.ptr_C, problem_size0, thread_idx, threadblock_offset),
iterator_D_(params.params_D, params.ptr_D, problem_size0, thread_idx, threadblock_offset)
{
alpha_ = (params.elementwise.alpha_ptr ? *params.elementwise.alpha_ptr : params.elementwise.alpha);
beta_ = (params.elementwise.beta_ptr ? *params.elementwise.beta_ptr : params.elementwise.beta);
if (beta_ == ElementAccumulator()) {
iterator_C_.clear_mask();
}
}
/// Helper to indicate split-K behavior
CUTLASS_DEVICE
void set_k_partition(
int split_k_index, ///< Index of this threadblock within split-K partitioned scheme
int split_k_slices) { ///< Total number of split-K slices
}
/// Called to set the batch index
CUTLASS_DEVICE
void set_batch_index(int batch_idx) {
}
/// Called at the start of the epilogue just before iterating over accumulator slices
CUTLASS_DEVICE
void begin_epilogue() {
// If shift-K feature is enabled, we load shift-k fragment
// at the very beginning of an epilogue
if (kIsShiftedVariance && params_.ptr_Shifted_K != nullptr) {
shift_k_frag_.clear();
int thread_offset_row_base = iterator_D_.thread_start_row();
CUTLASS_PRAGMA_UNROLL
for (int iter_idx = 0; iter_idx < kIterations; ++iter_idx) {
int step_offset = iter_idx * OutputTileIterator::Shape::kRow;
CUTLASS_PRAGMA_UNROLL
for (int rid = 0; rid < kRowIterations; ++rid) {
int row_step_offset = rid * kDeltaRow;
int row_offset = thread_offset_row_base + step_offset + row_step_offset;
bool is_load = (row_offset < extent_.row());
shift_k_frag_[iter_idx * kRowIterations + rid] = load_shift_k_(row_offset, is_load);
}
}
}
}
/// Called at the start of one step before starting accumulator exchange
CUTLASS_DEVICE
void begin_step(int step_idx) {
fragment_D_.clear();
if (elementwise_.kScale != cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling) {
fragment_C_.clear();
iterator_C_.load(fragment_C_);
++iterator_C_;
}
}
/// Called at the start of a row
CUTLASS_DEVICE
void begin_row(int row_idx) {
}
/// Called after accumulators have been exchanged for each accumulator vector
CUTLASS_DEVICE
void visit(
int iter_idx,
int row_idx,
int column_idx,
int frag_idx,
AccumulatorFragment const &accum) {
using Mul = cutlass::multiplies<ElementLayernormCompute>;
using Minus = cutlass::minus<ElementLayernormCompute>;
using Exp = cutlass::fast_exp_op<ElementLayernormCompute>;
[[maybe_unused]] Minus minus;
[[maybe_unused]] Mul mul;
[[maybe_unused]] Exp exponential;
LayernormFragment result;
thread_offset_ =
iterator_D_.thread_start() +
OutputTileIterator::ThreadMap::iteration_offset(frag_idx);
NumericArrayConverter<ElementLayernormCompute, ElementOutput, kElementsPerAccess> source_converter;
OutputVector &source_vector = reinterpret_cast<OutputVector *>(&fragment_C_)[frag_idx];
bool column_guard = (thread_offset_.column() < extent_.column());
if (elementwise_.kScale == cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling) {
result = source_converter(elementwise_(accum));
}else{
result = source_converter(elementwise_(accum, source_vector));
}
ElementLayernormCompute inv_scalar = cutlass::constants::one<ElementLayernormCompute>() / ElementLayernormCompute(extent_.column());
// Fragment is cleared for non-reachable columns so no need to check against column guard
accum_sum_element_ = element_sum_accumulator_(result);
// Square sum is different. Non-reachable columns should've been computed for shift-k
// Otherwise we will incorrectly have some extra k^2 added into square sum.
if (column_guard) {
accum_sum_square_ = (kIsShiftedVariance) ? \
square_sum_accumulator_(result, shift_k_frag_[iter_idx * kRowIterations + row_idx]) : \
square_sum_accumulator_(result);
}
else {
accum_sum_square_ = ElementLayernormCompute(0);
}
accum_sum_element_ *= inv_scalar;
accum_sum_square_ *= inv_scalar;
// After performing the in-thread reduction, we then perform cross-thread / in-warp reduction
CUTLASS_PRAGMA_UNROLL
for (int i = kHalfThreadsPerRow; i > 0; i >>= 1) {
accum_sum_element_ += __shfl_xor_sync(0xFFFFFFFF, accum_sum_element_, i);
accum_sum_square_ += __shfl_xor_sync(0xFFFFFFFF, accum_sum_square_, i);
}
// Convert to the output
NumericArrayConverter<ElementOutput, ElementLayernormCompute, kElementsPerAccess> output_converter;
OutputVector &output = reinterpret_cast<OutputVector *>(&fragment_D_)[frag_idx];
output = output_converter(result);
}
/// Called at the start of a row
CUTLASS_DEVICE
void end_row(int row_idx) {
using ConvertVarianceOutput = cutlass::NumericConverter<ElementVariance, ElementLayernormCompute>;
using ConvertMeanOutput = cutlass::NumericConverter<ElementMean, ElementLayernormCompute>;
ConvertVarianceOutput convert_variance_output;
ConvertMeanOutput convert_mean_output;
bool is_write_thread = (thread_offset_.row() < extent_.row() && (threadIdx.x % kThreadsPerRow) == 0);
int row_offset = thread_offset_.row() + blockIdx.y * extent_.row();
ElementVariance *curr_ptr_sum_square = params_.ptr_Variance + row_offset;
ElementMean *curr_ptr_element_sum = params_.ptr_Mean + row_offset;
arch::global_store<ElementVariance, sizeof(ElementVariance)>(
convert_variance_output(accum_sum_square_),
(void *)curr_ptr_sum_square,
is_write_thread);
arch::global_store<ElementMean, sizeof(ElementMean)>(
convert_mean_output(accum_sum_element_),
(void *)curr_ptr_element_sum,
is_write_thread);
}
/// Called after all accumulator elements have been visited
CUTLASS_DEVICE
void end_step(int step_idx) {
iterator_D_.store(fragment_D_);
++iterator_D_;
}
/// Called after all steps have been completed
CUTLASS_DEVICE
void end_epilogue() {
}
private:
CUTLASS_DEVICE
ElementLayernormCompute load_shift_k_(int row_offset, bool is_load) {
using ConvertShiftK = cutlass::NumericConverter<ElementLayernormCompute, ElementOutput>;
ConvertShiftK convert_shift_k;
ElementOutput shift_k_val;
// Computes the address to load shift_k element
ElementOutput *curr_ptr_shift_k = params_.ptr_Shifted_K + row_offset;
// Conditionally loads from global memory
arch::global_load<ElementOutput, sizeof(ElementOutput)>(shift_k_val, (void *)curr_ptr_shift_k, is_load);
// Converts data type to return
ElementLayernormCompute converted_shift_k_val = convert_shift_k(shift_k_val);
return converted_shift_k_val;
}
CUTLASS_DEVICE
ElementLayernormCompute square_sum_accumulator_(LayernormFragment const &accum) {
ElementLayernormCompute sum_ = ElementLayernormCompute(0);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < LayernormFragment::kElements; ++i) {
auto accum_ = accum[i];
sum_ += accum_ * accum_;
}
return sum_;
}
CUTLASS_DEVICE
ElementLayernormCompute square_sum_accumulator_(LayernormFragment const &accum, ElementLayernormCompute shift_k_val) {
ElementLayernormCompute sum_ = ElementLayernormCompute(0);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < LayernormFragment::kElements; ++i) {
auto accum_ = accum[i] - shift_k_val;
sum_ += accum_ * accum_;
}
return sum_;
}
CUTLASS_DEVICE
ElementLayernormCompute element_sum_accumulator_(LayernormFragment const &accum) {
ElementLayernormCompute sum_ = ElementLayernormCompute(0);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < LayernormFragment::kElements; ++i) {
sum_ += accum[i];
}
return sum_;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
/////////////////////////////////////////////////////////////////////////////////////////////////
///
template <
typename ElementInputA0_,
typename LayoutInputA0_,
typename ElementInputB0_,
typename LayoutInputB0_,
typename ElementOutput_,
typename LayoutOutput_,
typename ElementCompute_,
typename EpilogueFunctorOp_,
typename ThreadblockShape_,
typename WarpShape_,
typename InstructionShape_,
int Stages0,
int Stages1,
bool IsShiftedVariance_ = false
>
class GemmLayernorm {
public:
///////////////////////////////////////////////////////////////////////////////////////////////
//
// Type definitions
//
static bool const kInternalTranspose = cutlass::platform::is_same<LayoutOutput_, cutlass::layout::ColumnMajor>::value;
static bool const kIsShiftedVariance = IsShiftedVariance_;
// These is mandatory layout.
using LayoutInputScaleBias = cutlass::layout::RowMajor;
// These are mandatory data types.
using ElementLayernormCompute = float;
using ElementInputScaleBias = cutlass::half_t;
// These are mandatory params required by mainloop fusion
using OperatorClass = cutlass::arch::OpClassTensorOp;
using ArchTag = cutlass::arch::Sm80;
// These are mandatory layouts and data types
// that are inheritated from pre-defined params
using LayoutSumSqr = LayoutInputScaleBias;
using LayoutSum = LayoutInputScaleBias;
using ElementMean = ElementInputScaleBias;
using ElementVariance = ElementInputScaleBias;
///////////////////////////////////////////////////////////////////////////////////////////////
using LayoutInputA0 = LayoutInputA0_;
using LayoutInputB0 = LayoutInputB0_;
using LayoutInputA1 = LayoutOutput_;
using LayoutInputB1 = LayoutOutput_;
using LayoutOutputC0 = LayoutOutput_;
using LayoutOutputC1 = LayoutOutput_;
using ElementInputA0 = ElementInputA0_;
using ElementInputB0 = ElementInputB0_;
using ElementOutputC0 = ElementOutput_;
using ElementCompute = ElementCompute_;
using ElementInputB1 = ElementInputB0_;
using ElementInputA1 = ElementOutputC0;
using ElementOutputC1 = ElementOutputC0;
using EpilogueFunctorOp = EpilogueFunctorOp_;
using TensorRefA = TensorRef<ElementInputA0, LayoutInputA0>;
using TensorRefB = TensorRef<ElementInputB0, LayoutInputB0>;
using TensorRefC = TensorRef<ElementOutputC0, LayoutOutputC0>;
using TensorVariance = TensorRef<ElementVariance, LayoutSumSqr>;
using TensorMean = TensorRef<ElementMean, LayoutSum>;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
static int const kStages0 = Stages0;
static int const kStages1 = Stages1;
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
///////////////////////////////////////////////////////////////////////////////////////////////
using MapArguments = cutlass::gemm::kernel::detail::MapArguments<
ElementInputA0,
LayoutInputA0,
cutlass::ComplexTransform::kNone,
128 / cutlass::sizeof_bits<ElementInputA0>::value,
ElementInputB0,
LayoutInputB0,
cutlass::ComplexTransform::kNone,
128 / cutlass::sizeof_bits<ElementInputB0>::value,
LayoutOutputC0,
kInternalTranspose
>;
using DefaultGemmKernel = typename cutlass::gemm::kernel::DefaultGemm<
typename MapArguments::ElementA,
typename MapArguments::LayoutA,
MapArguments::kAlignmentA,
typename MapArguments::ElementB,
typename MapArguments::LayoutB,
MapArguments::kAlignmentB,
ElementOutputC0,
typename MapArguments::LayoutC,
ElementCompute,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueFunctorOp,
SwizzleThreadBlock,
kStages0,
true,
typename cutlass::gemm::device::DefaultGemmConfiguration<
OperatorClass, ArchTag, ElementInputA0, ElementInputB0, ElementOutputC0, ElementCompute>::Operator,
cutlass::gemm::SharedMemoryClearOption::kNone
>::GemmKernel;
///////////////////////////////////////////////////////////////////////////////////////////////
// Epilogue visitor
using EpilogueVisitor = kernel::EpilogueVisitorLayerNorm<
ThreadblockShape,
DefaultGemmKernel::kThreadCount,
typename DefaultGemmKernel::Epilogue::OutputTileIterator,
typename DefaultGemmKernel::Epilogue::AccumulatorFragmentIterator::AccumulatorTile,
ElementCompute,
ElementVariance,
ElementMean,
ElementLayernormCompute,
EpilogueFunctorOp,
kIsShiftedVariance
>;
/// Epilogue
using Epilogue = typename cutlass::epilogue::threadblock::EpilogueWithVisitorFromExistingEpilogue<
EpilogueVisitor,
typename DefaultGemmKernel::Epilogue
>::Epilogue;
// GEMM
using GemmEpilogueFusion = gemm::kernel::GemmWithEpilogueVisitor<
typename DefaultGemmKernel::Mma,
Epilogue,
SwizzleThreadBlock
>;
using ApplyFinalReductionKernel = kernel::ApplyFinalReduction<
ElementVariance,
ElementMean,
ElementLayernormCompute,
ElementOutputC0,
ThreadblockShape,
kIsShiftedVariance
>;
using GemmMainloopFusion = typename cutlass::gemm::device::GemmLayernormMainloopFusion<
ElementInputA1, LayoutInputA1,
ElementInputB1, LayoutInputB1,
ElementInputScaleBias, LayoutInputScaleBias,
ElementOutputC1, LayoutOutputC1,
ElementCompute,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueFunctorOp,
SwizzleThreadBlock,
kStages1
>;
public:
/// Arguments class
struct Arguments {
typename GemmEpilogueFusion::Arguments gemm0;
typename GemmMainloopFusion::Arguments gemm1;
typename ApplyFinalReductionKernel::Arguments reduction;
cutlass::gemm::GemmCoord extend;
//
// Methods
//
Arguments() { }
Arguments(
cutlass::gemm::GemmCoord problem_size0,
cutlass::gemm::GemmCoord problem_size1,
ElementInputA0 * ptr_A,
ElementInputB0 * ptr_B,
ElementOutputC0 * ptr_C,
ElementOutputC0 * ptr_D,
ElementOutputC0 * ptr_E,
ElementOutputC0 * ptr_O,
int64_t ldm_A,
int64_t ldm_B,
int64_t ldm_C,
int64_t ldm_D,
int64_t ldm_E,
int64_t ldm_O,
typename EpilogueFunctorOp::Params linear_scaling,
TensorVariance ref_Variance_,
TensorMean ref_Mean_,
TensorVariance ref_Gamma_,
TensorMean ref_Beta_,
ElementOutputC0 *ptr_Shifted_K = nullptr
):
gemm0(
cutlass::gemm::GemmUniversalMode::kGemm,
{kInternalTranspose ? problem_size0.n() : problem_size0.m(),\
kInternalTranspose ? problem_size0.m() : problem_size0.n(),\
problem_size0.k()},
{kInternalTranspose ? ptr_B : ptr_A, \
kInternalTranspose ? ldm_B : ldm_A},
{kInternalTranspose ? ptr_A : ptr_B, \
kInternalTranspose ? ldm_A : ldm_B},
typename EpilogueVisitor::Arguments(
linear_scaling,
{ptr_C, ldm_C},
{ptr_D, ldm_D},
ref_Variance_.data(),
ref_Mean_.data(),
ptr_Shifted_K
)
),
reduction(
MatrixCoord(kInternalTranspose ? problem_size0.n() : problem_size0.m(),\
kInternalTranspose ? problem_size0.m() : problem_size0.n()),
ref_Variance_,
ref_Mean_,
ptr_Shifted_K
),
gemm1(
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size1,
1,
linear_scaling,
kInternalTranspose ? ptr_E : ptr_D,
kInternalTranspose ? ptr_D : ptr_E,
ref_Variance_.data(),
ref_Mean_.data(),
ref_Gamma_.data(),
ref_Beta_.data(),
ptr_O,
ptr_O,
problem_size1.m() * problem_size1.k(),
problem_size1.n() * problem_size1.k(),
problem_size1.n(),
problem_size1.n(),
problem_size1.k(),
problem_size1.k(),
problem_size1.m() * problem_size1.n(),
problem_size1.m() * problem_size1.n(),
kInternalTranspose ? ldm_E : ldm_D,
kInternalTranspose ? ldm_D : ldm_D,
ref_Variance_.layout().stride(0),
ref_Mean_.layout().stride(0),
ref_Gamma_.layout().stride(0),
ref_Beta_.layout().stride(0),
ldm_O,
ldm_O
),
extend(problem_size0)
{
}
};
struct Params {
typename GemmEpilogueFusion::Params gemm0;
typename ApplyFinalReductionKernel::Params reduction;
MatrixCoord extend;
//
// Methods
//
Params() { }
Params(Arguments const &args):
gemm0(args.gemm0),
reduction(args.reduction),
extend(MatrixCoord(args.extend.m(), args.extend.n()))
{
}
};
public:
// Gemm
//
// Methods
//
private:
Params params_;
GemmMainloopFusion gemm_fusion_op;
public:
/// Ctor
GemmLayernorm() {
}
/// Initialize
Status initialize(Arguments const &args) {
params_ = Params(args);
cutlass::Status status;
size_t workspace_size = gemm_fusion_op.get_workspace_size(args.gemm1);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
status = gemm_fusion_op.can_implement(args.gemm1);
CUTLASS_CHECK(status);
status = gemm_fusion_op.initialize(args.gemm1, workspace.get());
CUTLASS_CHECK(status);
return cutlass::Status::kSuccess;
}
/// Run
Status run(cudaStream_t stream) {
//
// Launch the GEMM + layernorm kernel
//
dim3 gemm_grid = SwizzleThreadBlock().get_grid_shape(params_.gemm0.grid_tiled_shape);
dim3 gemm_block(GemmEpilogueFusion::kThreadCount, 1, 1);
int gemm_smem_size = int(sizeof(typename GemmEpilogueFusion::SharedStorage));
cutlass::Kernel<GemmEpilogueFusion><<<gemm_grid, gemm_block, gemm_smem_size, stream>>>(params_.gemm0);
cudaError_t result = cudaGetLastError();
if (result != cudaSuccess) {
return cutlass::Status::kErrorInternal;
}
//
// Launch the ApplyFinalReductionKernel
//
// always performs reduction from leading dimension
int leading_dim_0 = kInternalTranspose ? params_.extend.row() : params_.extend.column();
int leading_dim_1 = kInternalTranspose ? params_.extend.column() : params_.extend.row();
int thread_per_block = 128;
int block_per_row = (leading_dim_1 + thread_per_block - 1) / thread_per_block;
if (block_per_row < 4) {
thread_per_block = 32;
block_per_row = (leading_dim_1 + thread_per_block - 1) / thread_per_block;
}
dim3 final_reduction_block(thread_per_block);
dim3 final_reduction_grid(block_per_row);
Kernel<ApplyFinalReductionKernel><<<
final_reduction_grid, final_reduction_block, sizeof(typename ApplyFinalReductionKernel::SharedStorage), stream
>>>(params_.reduction);
result = cudaGetLastError();
if (result != cudaSuccess) {
return cutlass::Status::kErrorInternal;
}
//
// Launch the GEMM + mainloop fusion kernel
//
cutlass::Status status = gemm_fusion_op();
CUTLASS_CHECK(status);
return cutlass::Status::kSuccess;
}
/// Function call operator
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/37_gemm_layernorm_gemm_fusion/gemm_with_layernorm.h/0 | {
"file_path": "examples/37_gemm_layernorm_gemm_fusion/gemm_with_layernorm.h",
"repo_id": "examples",
"token_count": 12997
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run group convolution kernels using functions and data structures
provided by CUTLASS using tensor cores; which we run on a NVIDIA Ampere GPU.
There are 2 group conv mode:
1. cutlass::conv::GroupMode::kSingleGroup
This mode is for large K problem size: k_per_group (K/groups) equals or larger than
threadblock_tile_N. One or multiple threadblocks calculate data of one group.
2. cutlass::conv::GroupMode::kMultipleGroup
This mode is for small K problem size: k_per_group (K/groups) is smaller than threadblock_tile_N.
One threadblock will calculate data from more than one group.
Function profile_convolution_selecter() shows how to choose kernel with different group mode according
to problem size and threadblock_tile size.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_group_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
// Analytic kernel and operation for single group problem size
using AnalyticSingleGroupKernel = typename cutlass::conv::kernel::DefaultConv2dGroupFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::GroupMode::kSingleGroup,
cutlass::conv::IteratorAlgorithm::kAnalytic
>::Kernel;
using AnalyticSingleGroupOperation = cutlass::conv::device::ImplicitGemmConvolution<AnalyticSingleGroupKernel>;
// Analytic kernel and operation for multiple group problem size
using AnalyticMultipleGroupKernel = typename cutlass::conv::kernel::DefaultConv2dGroupFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::GroupMode::kMultipleGroup,
cutlass::conv::IteratorAlgorithm::kAnalytic
>::Kernel;
using AnalyticMultipleGroupOperation = cutlass::conv::device::ImplicitGemmConvolution<AnalyticMultipleGroupKernel>;
// Optimized kernel and operation for single group problem size
using OptimizedSingleGroupKernel = typename cutlass::conv::kernel::DefaultConv2dGroupFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::GroupMode::kSingleGroup,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using OptimizedSingleGroupOperation = cutlass::conv::device::ImplicitGemmConvolution<OptimizedSingleGroupKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
int groups;
bool reference_check;
bool measure_performance;
int iterations;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool optimized;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
groups(1),
reference_check(false),
measure_performance(false),
iterations(20),
alpha(1),
beta(0),
optimized(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("optimized")) {
optimized = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
cmd.get_cmd_line_argument("g", groups);
filter_size.c() = input_size.c() / groups;
cmd.get_cmd_line_argument("u", conv_stride.row());
cmd.get_cmd_line_argument("v", conv_stride.column());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "42_ampere_tensorop_group_conv example\n\n"
<< " This example uses Ampere's Tensor Core operators on F16 data types to compute\n"
<< " forward grouped convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --g=<int> Conv groups G\n\n"
<< " --u=<int> Conv stride_h\n\n"
<< " --v=<int> Conv stride_w\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --optimized If set (true), use optimized kernel, otherwise use analytic kernel.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/42_ampere_tensorop_group_conv/42_ampere_tensorop_group_conv --n=4 --h=16 --w=16 --c=256 --k=128 --r=3 --s=3 --g=8 --ref-check\n\n"
<< "$ ./examples/42_ampere_tensorop_group_conv/42_ampere_tensorop_group_conv --n=4 --h=16 --w=16 --c=256 --k=128 --r=3 --s=3 --g=2 --ref-check\n\n"
<< "$ ./examples/42_ampere_tensorop_group_conv/42_ampere_tensorop_group_conv --n=4 --h=16 --w=16 --c=256 --k=128 --r=3 --s=3 --g=2 --ref-check --optimized\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,G,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.groups << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
template <typename Conv2dOperation>
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0);
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(
tensor_d.host_view());
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices,
options.groups
);
// Construct Conv2dOperation::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename Conv2dOperation::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
Conv2dOperation implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on device...\n";
// Compute with reference implementation
cutlass::reference::device::Conv2dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_ref_d.device_ref(),
options.alpha,
options.beta
);
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_d.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
} else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
} else {
result.reference_check = cutlass::Status::kInvalid;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Result profile_convolution_selecter(Options const &options) {
int k_per_group = options.filter_size.n() / options.groups;
// In group conv, if k_per_group < threadblock_N, one Threadblock will calculate multiple groups
if (k_per_group < ThreadblockShape::kN) { // MultipleGroup mode
if (options.optimized) {
std::cerr << "Invalid problem: optimized group conv kernel doesn't support MultipleGroup (one CTA calculate multiple groups) mode" << std::endl;
exit(-1);
} else {
std::cout << "Select AnalyticMultipleGroupOperation\n";
return profile_convolution<AnalyticMultipleGroupOperation>(options);
}
} else { // SingleGroup mode
if (options.optimized) {
std::cout << "Select OptimizedSingleGroupOperation\n";
return profile_convolution<OptimizedSingleGroupOperation>(options);
} else {
std::cout << "Select AnalyticSingleGroupOperation\n";
return profile_convolution<AnalyticSingleGroupOperation>(options);
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution_selecter(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/42_ampere_tensorop_group_conv/ampere_tensorop_group_conv.cu/0 | {
"file_path": "examples/42_ampere_tensorop_group_conv/ampere_tensorop_group_conv.cu",
"repo_id": "examples",
"token_count": 8701
} | 6 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import gen_ir
import helper
import gen_threadblock as gen_tb
class gen_default_Gemm:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = "B2bGemm"
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_B2bMma(self, specialized_template_args):
code = "using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<\n"
code += specialized_template_args
code += ">::ThreadblockB2bMma;\n"
# print(code)
return code
def gen_epilogue(self):
epilogue_code = ""
epilogue_code += helper.var_idx("static const int kPartitionsK", self.b2b_num - 1) + helper.var_idx(" = ThreadblockShape", self.b2b_num - 1) + helper.var_idx("::kK / WarpShape", self.b2b_num - 1) + "::kK;\n"
epilogue_code += "using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<\n"
epilogue_code += " " + helper.var_idx("ThreadblockShape", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("typename B2bMma::Operator", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("kPartitionsK", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("EpilogueOutputOp", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("EpilogueOutputOp", self.b2b_num - 1) + "::kCount\n"
epilogue_code += ">::Epilogue;\n"
epilogue_code += "using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle, SplitKSerial>;\n\n"
return epilogue_code
def gen_include_header(self):
code = '''
/* Auto Generated code - Do not edit.*/
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/layout/matrix.h\"
#include \"{cutlass_dir}cutlass/numeric_types.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/epilogue.h\"
#include \"{cutlass_dir}cutlass/epilogue/thread/linear_combination.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/gemm/kernel/gemm_pipelined.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm75.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm70.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm80.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_simt.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/threadblock_swizzle.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_tensor_op.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_simt.h\"
#include \"{cutlass_dir}cutlass/transform/threadblock/predicated_tile_iterator.h\"
#include \"../kernel/b2b_gemm.h\"
#include \"../threadblock/default_b2b_mma.h\"
'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_code(self):
gen_using = ''
# Generate default template struct
gen_code = gen_ir.gen_template_struct("Default" + self.gen_class_name, self.template_param,"", speicalized = None, set_default=False)
filter_list = []
filter_list.append(('Stages', 2))
filter_list.append(("OperatorClass", "arch::OpClassTensorOp"))
filter_list.append(("ArchTag", "arch::Sm75"))
for i in range(self.b2b_num):
filter_list.append((helper.var_idx("LayoutC", i), "layout::RowMajor"))
rtn_template_args, speicalized_template_args = gen_ir.filtered_param(self.template_param, filter_list, keep_= True)
B2bMma_code = self.gen_B2bMma(speicalized_template_args)
epilogue_and_rest_code = self.gen_epilogue()
gen_special_code = gen_ir.gen_template_struct("Default" + self.gen_class_name, rtn_template_args, B2bMma_code + epilogue_and_rest_code, speicalized = speicalized_template_args, set_default=False)
code = gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("kernel", gen_code + gen_special_code)))
return self.gen_include_header() + code
class gen_Kernel:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = "B2bGemm"
self.template_param = template_param
self.b2bnum = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/matrix_coord.h\"\n'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_Params(self):
gen_param = ""
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + ";\n"
gen_param += " " + "cutlass::gemm::GemmCoord grid_tiled_shape;\n"
gen_param += " " + "typename B2bMma::IteratorA0::Params params_A0;\n"
gen_param += " " + "typename B2bMma::IteratorA0::TensorRef ref_A0;\n"
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::Params params_B", i) + ";\n"
gen_param += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::TensorRef ref_B", i) + ";\n"
if i == self.b2bnum - 1:
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::Params params_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_C", i) + ";\n"
else:
gen_param += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::Params params_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::TensorRef ref_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::Params params_D", self.b2bnum - 1) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_D", self.b2bnum - 1) + ";\n"
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("typename OutputOp", i) + helper.var_idx("::Params output_op_", i) + ";\n"
gen_param += " " + 'int batch_count' + ";\n"
gen_param += " " + 'int gemm_k_iterations_0' + ";\n"
return gen_param
def gen_Memberfunc(self):
code_default = "\nCUTLASS_HOST_DEVICE\n"
code_default += "Params()"
code_default += " { } \n\n"
code_construct = "\nCUTLASS_HOST_DEVICE\n"
code_construct += "Params(\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("cutlass::gemm::GemmCoord const & problem_size_", i) + ",\n"
code_construct += " " + "cutlass::gemm::GemmCoord const & grid_tiled_shape,\n"
code_construct += " " + "typename B2bMma::IteratorA0::TensorRef ref_A0,\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::TensorRef ref_B", i) + ",\n"
if i == self.b2bnum - 1:
code_construct += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_C", i) + ",\n"
else:
code_construct += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::TensorRef ref_C", i) + ",\n"
code_construct += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_D", self.b2bnum - 1) + ",\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("typename OutputOp", i) + helper.var_idx("::Params output_op_", i) + helper.var_idx(" = typename OutputOp", i) + "::Params(),\n"
code_construct += " " + "int batch_count = 1\n"
code_construct += "):\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("problem_size_", i) + helper.var_idx("(problem_size_", i) + "),\n"
code_construct += " " + "grid_tiled_shape(grid_tiled_shape),\n"
code_construct += " " + "params_A0(ref_A0.layout()),\n"
code_construct += " " + "ref_A0(ref_A0),\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("params_B", i) + helper.var_idx("(ref_B", i) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_B", i) + helper.var_idx("(ref_B", i) + "),\n"
code_construct += " " + helper.var_idx("params_C", i) + helper.var_idx("(ref_C", i) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_C", i) + helper.var_idx("(ref_C", i) + "),\n"
code_construct += " " + helper.var_idx("params_D", self.b2bnum - 1) + helper.var_idx("(ref_D", self.b2bnum - 1) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_D", self.b2bnum - 1) + helper.var_idx("(ref_D", self.b2bnum - 1) + "),\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("output_op_", i) + helper.var_idx("(output_op_", i) + "), \n"
code_construct += " " + "batch_count(batch_count) {\n"
code_construct += " " + helper.var_idx("gemm_k_iterations_", 0) + helper.var_idx(" = (problem_size_", 0) + helper.var_idx(".k() + B2bMma::Shape", 0) + helper.var_idx("::kK - 1) / B2bMma::Shape", 0) + "::kK;\n"
code_construct += "}\n"
return code_default + code_construct
def gen_using(self):
code_using = ""
for i in range(self.b2bnum - 1):
code_using += " " + helper.var_idx("using OutputOp", i) + helper.var_idx(" = typename B2bMma::OutputOp", i) + ";\n"
code_using += " " + helper.var_idx("using OutputOp", self.b2bnum - 1) + " = typename Epilogue::OutputOp;\n"
for i in range(self.b2bnum - 1):
code_using += " " + helper.var_idx("using FusedAddBiasEpilogue", i) + helper.var_idx(" = typename B2bMma::FusedAddBiasEpilogue", i) +";\n"
code_using += " " + "using WarpCount0 = typename B2bMma::WarpCount0;\n"
code_using += " " + "static int const kThreadCount = 32 * WarpCount0::kCount;\n"
code_using += gen_ir.gen_struct("Params", self.gen_Params() + self.gen_Memberfunc())
code_using += "union SharedStorage {\n"
code_using += " " + "typename B2bMma::B2bMmaSharedStorage main_loop;\n"
code_using += " " + "typename Epilogue::SharedStorage epilogue;\n"
code_using += "};\n"
return code_using
def gen_can_implement(self):
gen_code = ""
return gen_code
def gen_operator_and_constr(self):
ctr_code = "CUTLASS_HOST_DEVICE\n"
ctr_code += self.gen_class_name + "() { } \n\n"
operator_code = "CUTLASS_DEVICE\n"
operator_code += "void operator()(Params const ¶ms, SharedStorage &shared_storage) {\n"
operator_code += " " + "ThreadblockSwizzle threadblock_swizzle;\n"
operator_code += " " + "cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.grid_tiled_shape);\n"
operator_code += " " + "int batch_idx = threadblock_tile_offset.k();\n"
operator_code += " " + "if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||\n"
operator_code += " " + "params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {\n"
operator_code += " " + " " + "return;\n"
operator_code += " " + "}\n"
operator_code += " " + "cutlass::MatrixCoord tb_offset_A0{\n"
operator_code += " " + " " + "threadblock_tile_offset.m() * B2bMma::Shape0::kM,\n"
operator_code += " " + " " + "0\n"
operator_code += " " + "};\n"
for i in range(self.b2bnum):
operator_code += " " + helper.var_idx("cutlass::MatrixCoord tb_offset_B", i) + "{\n"
operator_code += " " + " " + "0,\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.n() * B2bMma::Shape", i) + "::kN\n"
operator_code += " " + "};\n"
operator_code += " " + "int thread_idx = threadIdx.x;\n\n"
operator_code += " " + "MatrixCoord threadblock_offset(\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.m() * B2bMma::Shape", self.b2bnum - 1) + "::kM,\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.n() * B2bMma::Shape", self.b2bnum - 1) + "::kN\n"
operator_code += " " + ");\n"
operator_code += " " + "typename B2bMma::IteratorA0 iterator_A0(\n"
operator_code += " " + " " + "params.params_A0,\n"
operator_code += " " + " " + "params.ref_A0.data(),\n"
operator_code += " " + " " + "params.problem_size_0.mk(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "tb_offset_A0);\n"
operator_code += " " + "iterator_A0.add_pointer_offset(batch_idx * params.problem_size_0.m() * params.problem_size_0.k());\n\n"
for i in range (self.b2bnum):
operator_code += " " + helper.var_idx("typename B2bMma::IteratorB", i ) + helper.var_idx(" iterator_B", i) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_B", i) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_B", i) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", i) + ".kn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + helper.var_idx("tb_offset_B", i) + ");\n"
operator_code += " " + helper.var_idx("iterator_B", i) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", i) + helper.var_idx(".n() * params.problem_size_", i) + ".k());\n\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("typename FusedAddBiasEpilogue", i ) + helper.var_idx("::OutputTileIterator iterator_C", i) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_C", i) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_C", i) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_" , i) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset" + ");\n"
operator_code += " " + helper.var_idx("int ref_C", i) + helper.var_idx("_stride = params.ref_C", i) + ".stride()[0];\n"
operator_code += " " + helper.var_idx("iterator_C", i) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", i) + helper.var_idx(".n() * (ref_C", i) + helper.var_idx("_stride == 0 ? 1 : params.problem_size_", i) + ".m()));\n\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("FusedAddBiasEpilogue", i ) + helper.var_idx(" epilogue_", i ) + ";\n"
operator_code += " " + "int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);\n"
operator_code += " " + "int lane_idx = threadIdx.x % 32;\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("OutputOp", i) + helper.var_idx(" output_op_", i) + helper.var_idx("(params.output_op_", i) + ");\n"
operator_code += " " + "B2bMma b2bMma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);\n"
operator_code += " " + "typename B2bMma::FragmentC0 src_accum;\n"
operator_code += " " + helper.var_idx("typename B2bMma::FragmentC", self.b2bnum - 1)+ " accumulators;\n"
operator_code += " " + "src_accum.clear();\n"
operator_code += " " + "accumulators.clear();\n"
operator_code += " " + "b2bMma(params.gemm_k_iterations_0, accumulators, iterator_A0, "
for i in range(self.b2bnum):
operator_code += helper.var_idx("iterator_B", i) + ", "
operator_code += "src_accum"
if self.b2bnum != 1:
operator_code += ", "
for i in range(self.b2bnum - 1):
operator_code += helper.var_idx("output_op_", i) + ", "
for i in range(self.b2bnum - 1):
operator_code += helper.var_idx("epilogue_", i) + ", "
for i in range(self.b2bnum - 1):
final = ", "
if i == self.b2bnum - 2:
final =""
operator_code += helper.var_idx("iterator_C", i) + final
operator_code += ");\n"
operator_code += " " + helper.var_idx("OutputOp", self.b2bnum - 1) + helper.var_idx(" output_op_", self.b2bnum - 1) + helper.var_idx("(params.output_op_", self.b2bnum - 1) + ");\n"
operator_code += " " + "threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.grid_tiled_shape);\n"
operator_code += " " + helper.var_idx("typename Epilogue::OutputTileIterator iterator_C", self.b2bnum - 1) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_C", self.b2bnum - 1) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_C", self.b2bnum - 1) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", self.b2bnum - 1) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset\n"
operator_code += " " + ");\n"
operator_code += " " + helper.var_idx("int ref_C", self.b2bnum - 1) + helper.var_idx("_stride = params.ref_C", self.b2bnum - 1) + ".stride()[0];\n"
operator_code += " " + helper.var_idx("iterator_C", self.b2bnum - 1) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", self.b2bnum - 1) + helper.var_idx(".n() * (ref_C", self.b2bnum - 1) + helper.var_idx("_stride == 0 ? 1 : params.problem_size_", self.b2bnum - 1) + ".m()));\n\n"
operator_code += " " + helper.var_idx("typename Epilogue::OutputTileIterator iterator_D", self.b2bnum - 1) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_D", self.b2bnum - 1) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_D", self.b2bnum - 1) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", self.b2bnum - 1) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset\n"
operator_code += " " + ");\n"
operator_code += " " + helper.var_idx("iterator_D", self.b2bnum - 1) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", self.b2bnum - 1) + helper.var_idx(".n() * params.problem_size_", self.b2bnum - 1) + ".m());\n\n"
operator_code += " " + "Epilogue epilogue(\n"
operator_code += " " + " " + "shared_storage.epilogue,\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "warp_idx,\n"
operator_code += " " + " " + "lane_idx\n"
operator_code += " " + ");\n"
operator_code += " " + "epilogue("
operator_code += helper.var_idx("output_op_", self.b2bnum - 1) + ", "
operator_code += helper.var_idx("iterator_D", self.b2bnum - 1) + ", "
operator_code += "accumulators, "
operator_code += helper.var_idx("iterator_C", self.b2bnum - 1) + ");\n"
operator_code += "}\n"
return ctr_code + operator_code
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/matrix_coord.h\"
#include \"{cutlass_dir}cutlass/semaphore.h\"
'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_code(self):
template_param = []
template_param.append(("typename", "B2bMma"))
template_param.append(("typename", "Epilogue"))
template_param.append(("typename", "ThreadblockSwizzle"))
template_param.append((bool, "SplitKSerial"))
code_body = ""
code_body += self.gen_using()
code_body += self.gen_operator_and_constr()
struct_code = gen_ir.gen_template_struct(self.gen_class_name, template_param, code_body)
code = self.gen_include_header()
code += gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("kernel", struct_code)))
return self.gen_include_header() + code
class gen_kernel:
def __init__(self, template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root):
self.template_param = template_param
self.gen_class_name = "B2bGemm"
self.gen_kernel_name = gen_class_name + "Kernel"
self.template_args = []
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
self.gen_default_b2b_gemm = gen_default_Gemm(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
self.gen_Kerenl = gen_Kernel(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
# Include gen_threadBlock
self.gen_threadBlock = gen_tb.gen_threadblock(template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root)
self.file_dir = output_dir + "/kernel/"
def gen_code(self, first_use_1stage):
default_b2b_gemm = self.gen_default_b2b_gemm.gen_code()
print("[INFO]: Gen kernel code [default_b2b_gemm.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "default_b2b_gemm.h", "w+") as f:
f.write(default_b2b_gemm)
kernel = self.gen_Kerenl.gen_code()
print("[INFO]: Gen kernel code [b2b_gemm.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "b2b_gemm.h", "w+") as f:
f.write(kernel)
# Call code to gen threadblock
self.gen_threadBlock.gen_code(first_use_1stage)
| examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_kernel.py/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_kernel.py",
"repo_id": "examples",
"token_count": 11319
} | 7 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Example of a GETT targeting Hopper tensor cores using the CUTLASS 3.x API.
CUTLASS has long provided implementations of Generalized Matrix times Matrix (GEMM) kernels.
However, a plethora of workloads compute on higher ranked tensors. Products of such tensors,
called tensor contractions, can be executed as multiple batched GEMMs, however, they can be
further accelerated with kernels that natively operate on these higher ranked tensors to
perform Generalized Tensor times Tensor contractions (GETT). CuTe's hierarchical layouts
and CUTLASS 3.0's unified micro-kernels make implementation of GETTs trivial. In this example,
we show how CUTLASS 3.0, CuTe, and Hopper's TMA feature together can accelerate GETTs while
making the process of authoring custom GETT kernels easier than ever before.
The modes of a tensor that participate in a GETT can be fundamentally grouped into four
semantic categories. The contraction modes (or K-modes) only appear in the A and B (left and right)
inputs but not in the C output tensor. Row modes (or M-modes) only appear in the left
input tensor (A) and the output tensor (C). Column modes (or N-modes) only appear in the
right (B) input tensor and the output tensor (C). Batch modes (or L-modes) appear in all
input and output tensors. If we fold the many modes of a tensor contraction into these four
categories, it would allow us to represent the input and output tensors as rank-3 "matrices"
that can be computed upon as if we were computing a batched GEMM!
This is exactly what CuTe's hierarchical layout representation allows us to do! Instead of having
simple integers as strides for these four modes, we can have nested strides for each of these
semantic categories that themselves have multiple modes within them -- multi-mode strides!
In CUTLASS 3.0, all one has to do to take advantage of this capability is to substitute the
required multi-mode strides instead of the default ones provided by gemm::detail::TagToStrideX.
In the following example, we illustrate how every Hopper GEMM in CUTLASS 3.0 is a GETT in disguise.
We begin by defining the four modes detailed above as Row, Col (column), Red (reduction), and
Bat (batch) strides, which we then nest for each of the in/out tensors to create our rank-3 stride
tuples. Note that although we do not define the problem shape type explicitely, it too remains a
rank-4 shape tuple just like any other batched GEMM, but instead with multi-mode shapes for each
of the four corresponding multi-modes within it. After this, the same CollectiveMma and
CollectiveBuilder we describe in examples 50 and 49 are used to create our kernel type. Nothing
else changes from a user's point of view. Note that multi-mode strides do not affect our
specializations in any way -- the lexical spelling of our kernels remains the same. The
only difference between a CUTLASS 3 batched GEMM and GETT are the instaced CuTe Layouts.
CollectiveBuilders rely on detecting the static-1 in the stride tuples to determine the major mode,
which is what the example demonstrates. However, it is possible to have all modes be dynamic as well
if the user assembles a CollectiveMma manually and ensures that the runtime strides are compatible
with the static micro-kernel of the collective (TiledMma, TiledCopy, and smem layouts). On the other
hand, a user can have more than one static stride too (which need not correspond to the major mode).
In particular, this example demonstrates a GETT where the 0th M-mode (M0) in A and the 0th K-mode (K0)
in B are major. All other combinations of major modes are supported, with the exception of mixed
K-major scenarios where both A and B are K-major (e.g. K0 is major in A but K1 is major in B).
NVIDIA Hopper architecture's TMA feature makes the predictaion required to implement these complicated
kernels trivial, as it is all handled by TMA itself without requiring any programmer effort.
Example executions, where the stride order defines the major-order (major on the left):
51_hopper_gett --modeC=m,n,l --modeA=m,k,l --modeB=k,n,l --extents=m:4096,n:4096,k:4096
51_hopper_gett --modeC=l,m,n --modeA=m,l,k --modeB=k,n,l --extents=m:128,n:128,k:128,l:64
51_hopper_gett --modeC=m,a,b,p,q,n,l --modeA=m,l,b,k,a --modeB=k,n,p,q,l --extents=m:32,a:32,b:3,n:128,k:128,l:4,p:3,q:3
*/
#include "gett_kernel.cuh"
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/util/gett_commandline.hpp"
#include "cutlass/util/reference/device/gett.hpp"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/print_error.hpp"
namespace example {
// Returns true if the left-most value in the tuple is statically known to be 1
template<class Stride>
constexpr bool
is_left_major() {
// Account for stride types with and without batch mode and batch modes with static zero stride
return cute::is_constant<1, decltype(cute::size<0,0>(Stride{}))>::value;
}
// Same as cute::make_int_tuple but inserts a major stride (Int<1>) for the leftmost mode if required
template <int Rank, bool IsMajor, class Indexable>
static constexpr
auto
make_stride_tuple(Indexable const& t, int n, int64_t init_default = 0) {
static_assert(Rank > 1);
if constexpr (IsMajor) {
return cute::transform(cute::make_seq<Rank>{}, [&](auto i) {
if constexpr (i == 0) {
return cute::Int<1>{};
}
else {
return i < n ? t[i] : init_default;
}
});
}
else {
return cute::make_int_tuple<Rank>(t, n, init_default);
}
}
} // namespace example
//////////////////////////////////////////////////////////////////////////////
int
main(int argc, char const* argv[]) {
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
if (argc != 5) {
std::cout << "Number of command line args must be 4.\n";
cutlass::GettCommandLine::print_usage();
return 0;
}
//
// Define the stride types for A, B, C, and D
//
// Stride for A (left input). If reduction mode is major, same must be major in B
// For this example, M0 is major in A.
using RowModeStridesA = cute::Stride<cute::Int<1>, int64_t, int64_t, int64_t>;
using RedModeStridesA = cute::Stride<int64_t, int64_t, int64_t>;
using BatModeStridesA = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
// Stride for B (right input). If reduction mode is major, same must be major in A
// For this example, K0 is major in B.
using ColModeStridesB = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
using RedModeStridesB = cute::Stride<cute::Int<1>, int64_t, int64_t>;
using BatModeStridesB = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
// Strides for output, which can all be dynamic.
using RowModeStridesC = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
using ColModeStridesC = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
using BatModeStridesC = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
// Assmble our rank-3 multi-mode strides for the in/out tensors
using StrideA = cute::Stride<RowModeStridesA, RedModeStridesA, BatModeStridesA>;
using StrideB = cute::Stride<ColModeStridesB, RedModeStridesB, BatModeStridesB>;
using StrideC = cute::Stride<RowModeStridesC, ColModeStridesC, BatModeStridesC>;
// Note: C and D share strides here for simplicity.
// In general, they need not have the same layout.
using StrideD = StrideC;
//
// Define element types for tensors and intermediate values
//
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using ElementD = float;
using ElementAccumulator = float;
using ElementEpilogue = float;
// The following constexpr values set the max number of modes in each MNKL mode
constexpr int MaxRank_M = cute::rank(RowModeStridesA{}); // Max row modes
constexpr int MaxRank_N = cute::rank(ColModeStridesB{}); // Max column modes
constexpr int MaxRank_K = cute::rank(RedModeStridesA{}); // Max contraction modes
constexpr int MaxRank_L = cute::rank(BatModeStridesA{}); // Max batch modes
static_assert(cute::rank(RowModeStridesA{}) == cute::rank(RowModeStridesC{}));
static_assert(cute::rank(ColModeStridesB{}) == cute::rank(RowModeStridesC{}));
static_assert(cute::rank(RedModeStridesA{}) == cute::rank(RedModeStridesB{}));
static_assert(cute::rank(BatModeStridesA{}) == cute::rank(BatModeStridesC{}));
static_assert(cute::rank(BatModeStridesB{}) == cute::rank(BatModeStridesC{}));
// Parse command line to get modes, extents, and strides
cutlass::GettCommandLine cmd;
auto parsed_args = cmd.parse(argc, argv, true);
auto& m = parsed_args.M;
auto& ldAm = parsed_args.ldAm;
auto& ldCm = parsed_args.ldCm;
int rank_m = int(m.size());
auto& n = parsed_args.N;
auto& ldBn = parsed_args.ldBn;
auto& ldCn = parsed_args.ldCn;
int rank_n = int(n.size());
auto& k = parsed_args.K;
auto& ldAk = parsed_args.ldAk;
auto& ldBk = parsed_args.ldBk;
int rank_k = int(k.size());
auto& l = parsed_args.L;
auto& ldAl = parsed_args.ldAl;
auto& ldBl = parsed_args.ldBl;
auto& ldCl = parsed_args.ldCl;
int rank_l = int(l.size());
if ((rank_m > MaxRank_M) || (rank_n > MaxRank_N) || (rank_k > MaxRank_K) || (rank_l > MaxRank_L)) {
std::cerr << "ERROR: Input has more modes than statically configured.";
return 1;
}
// Check that the user input major stride match the static major strides.
if (example::is_left_major<RowModeStridesA>() && (ldAm[0] != 1)) {
std::cerr << "ERROR: A_M0 is expected to be major, but was not in the provided input!\n";
return 1;
}
if (example::is_left_major<RedModeStridesA>() && (ldAk[0] != 1)) {
std::cerr << "ERROR: A_K0 is expected to be major, but was not in the provided input!\n";
return 1;
}
if (example::is_left_major<ColModeStridesB>() && (ldBn[0] != 1)) {
std::cerr << "ERROR: B_N0 is expected to be major, but was not in the provided input!\n";
return 1;
}
if (example::is_left_major<RedModeStridesB>() && (ldBk[0] != 1)) {
std::cerr << "ERROR: B_K0 is expected to be major, but was not in the provided input!\n";
return 1;
}
// Convert to `cute::Tuple`s and set up arguments
auto M = make_int_tuple<MaxRank_M>(m.data(), rank_m, 1);
auto dAm = example::make_stride_tuple<MaxRank_M, example::is_left_major<RowModeStridesA>()>(ldAm.data(), rank_m);
auto dCm = example::make_stride_tuple<MaxRank_M, example::is_left_major<RowModeStridesC>()>(ldCm.data(), rank_m);
auto N = make_int_tuple<MaxRank_N>(n.data(), rank_n, 1);
auto dBn = example::make_stride_tuple<MaxRank_N, example::is_left_major<ColModeStridesB>()>(ldBn.data(), rank_n);
auto dCn = example::make_stride_tuple<MaxRank_N, example::is_left_major<ColModeStridesC>()>(ldCn.data(), rank_n);
auto K = make_int_tuple<MaxRank_K>(k.data(), rank_k, 1);
auto dAk = example::make_stride_tuple<MaxRank_K, example::is_left_major<RedModeStridesA>()>(ldAk.data(), rank_k);
auto dBk = example::make_stride_tuple<MaxRank_K, example::is_left_major<RedModeStridesB>()>(ldBk.data(), rank_k);
auto L = make_int_tuple<MaxRank_L>(l.data(), rank_l, 1);
auto dAl = make_int_tuple<MaxRank_L>(ldAl.data(), rank_l, 0);
auto dBl = make_int_tuple<MaxRank_L>(ldBl.data(), rank_l, 0);
auto dCl = make_int_tuple<MaxRank_L>(ldCl.data(), rank_l, 0);
// Concat tuples to turn it into rank-4 problem shape and rank-3 strides, just like GEMM
auto problem_shape = make_shape(M, N, K, L);
StrideA stride_A = make_stride(dAm, dAk, dAl);
StrideB stride_B = make_stride(dBn, dBk, dBl);
StrideC stride_C = make_stride(dCm, dCn, dCl);
StrideD stride_D = stride_C;
auto alpha = ElementEpilogue(1.0f);
auto beta = ElementEpilogue(1.0f);
//
// Allocate and init tensors
//
auto M_size = std::accumulate(std::begin(m), std::end(m), 1, std::multiplies<>{});
auto N_size = std::accumulate(std::begin(n), std::end(n), 1, std::multiplies<>{});
auto K_size = std::accumulate(std::begin(k), std::end(k), 1, std::multiplies<>{});
auto L_size = std::accumulate(std::begin(l), std::end(l), 1, std::multiplies<>{});
thrust::host_vector<ElementA> h_A(M_size * K_size * L_size);
thrust::host_vector<ElementB> h_B(N_size * K_size * L_size);
thrust::host_vector<ElementC> h_C(M_size * N_size * L_size);
thrust::host_vector<ElementD> h_D(M_size * N_size * L_size);
// Note: the cast to int here is to avoid false-negative ref-checks which can
// occur due to floating point arithmetic not being purely associative.
for (auto& a : h_A) a = ElementA(int(4*(rand() / double(RAND_MAX)) - 1));
for (auto& b : h_B) b = ElementB(int(4*(rand() / double(RAND_MAX)) - 1));
for (auto& c : h_C) c = ElementC(int(4*(rand() / double(RAND_MAX)) - 1));
for (auto& d : h_D) d = ElementD(-1);
thrust::device_vector<ElementA> d_A = h_A;
thrust::device_vector<ElementB> d_B = h_B;
thrust::device_vector<ElementC> d_C = h_C;
thrust::device_vector<ElementD> cutlass_result = h_D;
thrust::device_vector<ElementD> reference_result = h_D;
//
// Compute GETT
//
auto status = example::gett_kernel(
problem_shape,
d_A.data().get(), stride_A,
d_B.data().get(), stride_B,
ElementAccumulator{},
d_C.data().get(), stride_C,
cutlass_result.data().get(), stride_D,
alpha, beta);
if (cutlass::Status::kSuccess != status) {
std::cerr << "ERROR: GETT operator launch failed.\n";
return 1;
}
auto cuda_err = cudaDeviceSynchronize();
if (cudaSuccess != cuda_err) {
std::cerr << "ERROR: GETT operator execution failed. with error :";
std::cerr << cudaGetErrorString(cuda_err) << "\n";
return 1;
}
//
// Verify
//
cutlass::reference::device::gett(
problem_shape,
d_A.data().get(), stride_A,
d_B.data().get(), stride_B,
ElementAccumulator{},
d_C.data().get(), stride_C,
reference_result.data().get(), stride_D,
alpha, beta);
cuda_err = cudaDeviceSynchronize();
if (cudaSuccess != cuda_err) {
std::cerr << "ERROR: GETT reference execution failed. with error :";
std::cerr << cudaGetErrorString(cuda_err) << "\n";
return 1;
}
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::device::BlockCompareEqual(
reference_result.data().get(), cutlass_result.data().get(), cutlass_result.size());
if (passed) {
std::cout << "GETT verification passed.\n";
return 0;
}
else {
std::cerr << "ERROR: GETT verification failed! Printing detailed stats.\n";
h_D = reference_result;
thrust::host_vector<ElementD> h_cutlass_result = cutlass_result;
print_relative_error(h_cutlass_result.size(), h_cutlass_result.data(), h_D.data());
std::cout << "StrideA: "; print(stride_A); std::cout << '\n';
std::cout << "StrideB: "; print(stride_B); std::cout << '\n';
std::cout << "StrideC: "; print(stride_C); std::cout << '\n';
std::cout << "StrideD: "; print(stride_D); std::cout << '\n';
return 1;
}
#else
std::cerr << "Unsupported example. Please ensure CUTLASS_ARCH_MMA_SM90_SUPPORTED is defined.\n";
return 0;
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
}
| examples/51_hopper_gett/51_hopper_gett.cu/0 | {
"file_path": "examples/51_hopper_gett/51_hopper_gett.cu",
"repo_id": "examples",
"token_count": 6010
} | 8 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cute/layout.hpp"
#include "cute/tensor.hpp"
#include "cute/util/print.hpp"
namespace example {
using namespace cute;
// Empty type used to disable gather/scatter for a GEMM argument
struct NoGather
{
template<class... Ts>
NoGather(Ts...) {};
};
/// Function object that applies an index to its argument
template <class Index>
struct IndexedGather
{
CUTE_HOST_DEVICE constexpr
IndexedGather(Index const *indices = {}): indices_(indices) {}
template <typename I>
CUTE_HOST_DEVICE constexpr
Index
operator()(I i) const { return indices_[i]; }
CUTE_HOST_DEVICE friend
void
print(IndexedGather const &s) {
cute::print("Indexed");
}
Index const *indices_;
};
/// Function object that applies a stride to its argument
/// Example: StridedFunc<int,_2> gathers every other row/column
template <class Stride>
struct StridedGather
{
CUTE_HOST_DEVICE constexpr
StridedGather(Stride stride = {}): stride_(stride) {}
template <class I>
CUTE_HOST_DEVICE constexpr
auto
operator()(I i) const { return i * stride_; }
CUTE_HOST_DEVICE friend
void
print(StridedGather const &s) {
cute::print("Strided{");
print(s.stride_);
cute::print("}");
}
Stride stride_;
};
/// Custom stride object that applies a function followed by a stride
template <class Func, class Stride>
struct CustomStride
{
CUTE_HOST_DEVICE constexpr
CustomStride(Func const &func, Stride const &stride): func_(func), stride_(stride) {}
template <class I>
CUTE_HOST_DEVICE constexpr friend
auto
operator*(I i, CustomStride const &s) { return s.func_(i) * s.stride_; }
template <class I>
CUTE_HOST_DEVICE constexpr friend
auto
operator*(CustomStride const &s, I i) { return s.func_(i) * s.stride_; }
CUTE_HOST_DEVICE friend
void
print(CustomStride const & s) {
cute::print("Custom{");
print(s.func_);
cute::print(",");
print(s.stride_);
cute::print("}");
}
template<class Div>
CUTE_HOST_DEVICE constexpr friend
auto
safe_div(CustomStride const &s, Div const &div)
{
return CustomStride<Func, decltype(safe_div(s.stride_, div))>(s.func_, safe_div(s.stride_, div));
}
// Circumvent the requirement on make_layout that shape and stride are integral
template <class Shape>
CUTE_HOST_DEVICE constexpr friend
auto
make_layout(Shape const &shape, CustomStride const &stride)
{
return Layout<Shape, CustomStride>(shape, stride);
}
Func func_;
Stride stride_;
};
template<class Stride, class Func>
CUTLASS_HOST_DEVICE
auto
make_custom_stride_layout(Stride const &stride, Func&& func)
{
// Use a dummy shape and replace the first non-unit stride with a custom gather stride
auto idx = find_if(stride, [](auto x){ return not is_constant<1, decltype(x)>{}; });
constexpr int I = decltype(idx)::value;
return make_layout(repeat_like(stride, _1{}),
replace<I>(stride, CustomStride{static_cast<Func&&>(func), get<I>(stride)}));
}
/// Helper function to optionally create a gather tensor
template<class Iterator, class Shape, class Stride, class Func>
CUTLASS_HOST_DEVICE
auto
make_gather_tensor(Iterator iter, Shape const &shape, Stride const &stride, Func &&func)
{
if constexpr (not cutlass::platform::is_same<remove_cvref_t<Func>, NoGather>::value) {
Layout matrix_layout = make_identity_layout(shape);
auto offset = as_arithmetic_tuple(repeat_like(shape, _0{}));
Layout gather_layout = make_custom_stride_layout(stride, static_cast<Func&&>(func));
return make_tensor(iter, ComposedLayout{gather_layout, offset, matrix_layout});
} else {
return make_tensor(iter, shape, stride);
}
}
} // namespace example
namespace cute
{
template<int N, int I, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
upcast(Shape const& shape, Stride const& stride)
{
if constexpr (is_tuple<Shape>::value) {
return transform_layout(shape, stride, [](auto const& s, auto const& d) { return upcast<N,I>(s,d); });
} else if constexpr (is_scaled_basis<Stride>::value) {
if constexpr (Stride::mode() == I) {
return make_layout(shape_div(shape, Int<N>{}), shape_div(stride, Int<N>{}));
} else {
return make_layout(shape, stride);
}
} else {
return upcast<N>(shape, stride);
}
CUTE_GCC_UNREACHABLE;
}
template <int N, class OuterShape, class OuterStride, class Offset, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
upcast(ComposedLayout<Layout<OuterShape,OuterStride>,Offset,Layout<Shape,Stride>> const& layout)
{
// Find index of the stride-1 mode - that is the only one that requires updating inner shape and offset
auto idx = find_if(layout.layout_a().stride(), [](auto x){ return is_constant<1, decltype(x)>{}; });
constexpr int I = decltype(idx)::value;
// Upcast the outer layout (works as expected)
auto outer = upcast<N>(layout.layout_a());
// Upcast the accumulated offset along stride-1 mode
auto offset = as_arithmetic_tuple(replace<I>(layout.offset(), upcast<N>(get<I>(layout.offset()))));
// Upcast the inner layout's shape along stride-1 mode
auto inner = upcast<N,I>(layout.layout_b().shape(), layout.layout_b().stride());
return composition(outer, offset, inner);
}
} // namespace example
| examples/common/gather_tensor.hpp/0 | {
"file_path": "examples/common/gather_tensor.hpp",
"repo_id": "examples",
"token_count": 2342
} | 9 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/copy.hpp>
// Config
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) && (__CUDACC_VER_MAJOR__ >= 12))
# define CUTE_ARCH_STSM_SM90_ENABLED
# define CUTE_ARCH_TMA_SM90_ENABLED
#endif
#if defined(CUTE_ARCH_TMA_SM90_ENABLED) && \
((__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 3)))
# define CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED
#endif
namespace cute
{
struct SM90_U32x1_STSM_N
{
using SRegisters = uint32_t[1];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src,
uint128_t & smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x1.m8n8.shared.b16 [%0], {%1};\n"
:: "r"(smem_int_ptr),
"r"(src));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U32x2_STSM_N
{
using SRegisters = uint32_t[2];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src0, uint32_t const& src1,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x2.m8n8.shared.b16 [%0], {%1, %2};\n"
:: "r"(smem_int_ptr),
"r"(src0), "r"(src1));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U32x4_STSM_N
{
using SRegisters = uint32_t[4];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src0, uint32_t const& src1, uint32_t const& src2, uint32_t const& src3,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x4.m8n8.shared.b16 [%0], {%1, %2, %3, %4};\n"
:: "r"(smem_int_ptr),
"r"(src0), "r"(src1), "r"(src2), "r"(src3));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U16x2_STSM_T
{
using SRegisters = uint32_t[1];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x1.trans.m8n8.shared.b16 [%0], {%1};\n"
:: "r"(smem_int_ptr),
"r"(src));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U16x4_STSM_T
{
using SRegisters = uint32_t[2];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src0, uint32_t const& src1,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x2.trans.m8n8.shared.b16 [%0], {%1, %2};\n"
:: "r"(smem_int_ptr),
"r"(src0), "r"(src1));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U16x8_STSM_T
{
using SRegisters = uint32_t[4];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src0, uint32_t const& src1, uint32_t const& src2, uint32_t const& src3,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x4.trans.m8n8.shared.b16 [%0], {%1, %2, %3, %4};\n"
:: "r"(smem_int_ptr),
"r"(src0), "r"(src1), "r"(src2), "r"(src3));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
//
// Legacy STSM interfaces that aren't very useful
//
template <class T>
CUTE_HOST_DEVICE
void
copy_stsm(T const* const rmem_ptr,
uint128_t* const smem_ptr)
{
uint32_t const* reg_ptr = reinterpret_cast<uint32_t const*>(rmem_ptr);
// if constexpr
if (sizeof(T) == 4) {
SM90_U32x1_STSM_N::copy(reg_ptr[0], smem_ptr[0]);
}
else if (sizeof(T) == 8) {
SM90_U32x2_STSM_N::copy(reg_ptr[0], reg_ptr[1], smem_ptr[0]);
}
else if (sizeof(T) == 16) {
SM90_U32x4_STSM_N::copy(reg_ptr[0], reg_ptr[1], reg_ptr[2], reg_ptr[3], smem_ptr[0]);
}
else {
static_assert(sizeof(T) == 4 || sizeof(T) == 8 || sizeof(T) == 16, "sizeof(T) is not supported");
}
}
template <class T>
CUTE_HOST_DEVICE
void
copy_stsm_trans(T const* const rmem_ptr,
uint128_t* const smem_ptr)
{
uint32_t const* reg_ptr = reinterpret_cast<uint32_t const*>(rmem_ptr);
// if constexpr
if (sizeof(T) == 4) {
SM90_U16x2_STSM_T::copy(reg_ptr[0], smem_ptr[0]);
}
else if (sizeof(T) == 8) {
SM90_U16x4_STSM_T::copy(reg_ptr[0], reg_ptr[1], smem_ptr[0]);
}
else if (sizeof(T) == 16) {
SM90_U16x8_STSM_T::copy(reg_ptr[0], reg_ptr[1], reg_ptr[2], reg_ptr[3], smem_ptr[0]);
}
else {
static_assert(sizeof(T) == 4 || sizeof(T) == 8 || sizeof(T) == 16, "sizeof(T) is not supported");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // end namespace cute
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <cute/arch/copy_sm90_desc.hpp>
#include <cute/arch/copy_sm90_tma.hpp>
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cute/arch/copy_sm90.hpp/0 | {
"file_path": "include/cute/arch/copy_sm90.hpp",
"repo_id": "include",
"token_count": 3157
} | 10 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/arch/copy_sm80.hpp>
#include <cute/atom/copy_traits.hpp>
#include <cute/layout.hpp>
namespace cute
{
template <class S, class D>
struct Copy_Traits<SM80_CP_ASYNC_CACHEALWAYS<S,D>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// Construct a zfill variant with a given predicate value
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM80_CP_ASYNC_CACHEALWAYS_ZFILL<S,D>>
with(bool pred) const {
return {pred};
}
};
template <class S, class D>
struct Copy_Traits<SM80_CP_ASYNC_CACHEGLOBAL<S,D>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// Construct a zfill variant with a given predicate value
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM80_CP_ASYNC_CACHEGLOBAL_ZFILL<S,D>>
with(bool pred) const {
return {pred};
}
};
template <class S, class D>
struct Copy_Traits<SM80_CP_ASYNC_CACHEALWAYS_ZFILL<S,D>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// Predicate value that determines whether to load or zfill
bool pred = false;
// Overload copy_unpack for zfill variant to pass the predicate into the op
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_gmem<TS>::value, "Expected gmem source for cp.async.");
static_assert(is_smem<TD>::value, "Expected smem destination for cp.async.");
Tensor rS = recast<S>(src);
Tensor rD = recast<D>(dst);
CUTE_STATIC_ASSERT_V(size(rS) == Int<1>{},
"In CopyAtom, src layout doesn't vectorize into registers. This src layout is incompatible with this tiled copy.");
CUTE_STATIC_ASSERT_V(size(rD) == Int<1>{},
"In CopyAtom, dst layout doesn't vectorize into registers. This dst layout is incompatible with this tiled copy.");
SM80_CP_ASYNC_CACHEALWAYS_ZFILL<S,D>::copy(rS[0], rD[0], traits.pred);
}
};
template <class S, class D>
struct Copy_Traits<SM80_CP_ASYNC_CACHEGLOBAL_ZFILL<S,D>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// Predicate value that determines whether to load or zfill
bool pred = false;
// Overload copy_unpack for zfill variant to pass the predicate into the op
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_gmem<TS>::value, "Expected gmem source for cp.async.");
static_assert(is_smem<TD>::value, "Expected smem destination for cp.async.");
Tensor rS = recast<S>(src);
Tensor rD = recast<D>(dst);
CUTE_STATIC_ASSERT_V(size(rS) == Int<1>{},
"In CopyAtom, src layout doesn't vectorize into registers. This src layout is incompatible with this tiled copy.");
CUTE_STATIC_ASSERT_V(size(rD) == Int<1>{},
"In CopyAtom, dst layout doesn't vectorize into registers. This dst layout is incompatible with this tiled copy.");
SM80_CP_ASYNC_CACHEGLOBAL_ZFILL<S,D>::copy(rS[0], rD[0], traits.pred);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Element copy selector
template <class SrcTensor, class DstTensor>
CUTE_HOST_DEVICE constexpr
auto
select_elementwise_copy(SrcTensor const&, DstTensor const&)
{
using SrcType = typename SrcTensor::value_type;
using DstType = typename DstTensor::value_type;
#if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED)
if constexpr (is_gmem<SrcTensor>::value && is_smem<DstTensor>::value &&
sizeof(SrcType) == sizeof(DstType) &&
(sizeof(SrcType) == 4 || sizeof(SrcType) == 8 || sizeof(SrcType) == 16))
{
return SM80_CP_ASYNC_CACHEALWAYS<SrcType,DstType>{};
} else {
return UniversalCopy<SrcType,DstType>{};
}
CUTE_GCC_UNREACHABLE;
#else
return UniversalCopy<SrcType,DstType>{};
#endif
}
}
| include/cute/atom/copy_traits_sm80.hpp/0 | {
"file_path": "include/cute/atom/copy_traits_sm80.hpp",
"repo_id": "include",
"token_count": 2594
} | 11 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <vector_types.h>
#include <cutlass/numeric_types.h>
#include <cutlass/numeric_size.h>
#include <cute/numeric/int.hpp>
#include <cute/numeric/real.hpp>
namespace cute {
template <typename T>
struct sizeof_bits : public cutlass::sizeof_bits<T> {};
// DO NOT change auto to int, sizeof_bits<sparse_elem> use integral_ratio instead of int
template <class T>
static constexpr auto sizeof_bits_v = sizeof_bits<T>::value;
using cutlass::bits_to_bytes;
using cutlass::is_subbyte;
template <class T>
static constexpr auto is_subbyte_v = is_subbyte<T>::value;
using cutlass::half_t;
using cutlass::bfloat16_t;
using cutlass::tfloat32_t;
// Umbrella floating-point 8-bit data type : type_erased_dynamic_float8_t
// This umbrella datatype can be enabled when a user provides a specific
// datatype in runtime argument list.
using cutlass::type_erased_dynamic_float8_t;
using cutlass::float_e4m3_t;
using cutlass::float_e5m2_t;
using cutlass::uint1b_t;
using cutlass::int2b_t;
using cutlass::uint2b_t;
using cutlass::int4b_t;
using cutlass::uint4b_t;
using cutlass::bin1_t;
} // end namespace cute
| include/cute/numeric/numeric_types.hpp/0 | {
"file_path": "include/cute/numeric/numeric_types.hpp",
"repo_id": "include",
"token_count": 877
} | 12 |
/***************************************************************************************************
* Copyright (c) 2024 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Sparse matrix multiply accumulate for SM89
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#if (__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 4)
# define CUTLASS_ARCH_SPARSE_MMA_SM89_SUPPORTED 1
#endif
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM89_SUPPORTED) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 890)
# define CUTLASS_ARCH_SPARSE_MMA_SM89_ENABLED
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = fe4m3 * fe4m3 + F32
template <typename Operator_>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
cutlass::float_e4m3_t,
layout::RowMajor,
cutlass::float_e4m3_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_,
SPFormatType::Thread> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = cutlass::float_e4m3_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e4m3_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 16>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<ElementC, 4>;
using FragmentE = uint32_t;
using Operator = Operator_;
using ArchTag = arch::Sm89;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.f32.e4m3.e4m3.f32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
}
else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = fe4m3 * fe5m2 + F32
template <typename Operator_>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
cutlass::float_e4m3_t,
layout::RowMajor,
cutlass::float_e5m2_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_,
SPFormatType::Thread> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = cutlass::float_e4m3_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e5m2_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 16>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<ElementC, 4>;
using FragmentE = uint32_t;
using Operator = Operator_;
using ArchTag = arch::Sm89;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.f32.e4m3.e5m2.f32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
}
else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = fe5m2 * fe4m3 + F32
template <typename Operator_>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
cutlass::float_e5m2_t,
layout::RowMajor,
cutlass::float_e4m3_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_,
SPFormatType::Thread> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = cutlass::float_e5m2_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e4m3_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 16>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<ElementC, 4>;
using FragmentE = uint32_t;
using Operator = Operator_;
using ArchTag = arch::Sm89;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.f32.e5m2.e4m3.f32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
}
else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = fe5m2 * fe5m2 + F32
template <typename Operator_>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
cutlass::float_e5m2_t,
layout::RowMajor,
cutlass::float_e5m2_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_,
SPFormatType::Thread> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = cutlass::float_e5m2_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e5m2_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 16>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<ElementC, 4>;
using FragmentE = uint32_t;
using Operator = Operator_;
using ArchTag = arch::Sm89;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.f32.e5m2.e5m2.f32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
}
else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/arch/mma_sparse_sm89.h/0 | {
"file_path": "include/cutlass/arch/mma_sparse_sm89.h",
"repo_id": "include",
"token_count": 5036
} | 13 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Utilities for performing block-striped access (load, store, reduce) of trivially-copyable,
statically-sized array types to global memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/wmma_array.h"
#include "cutlass/functional.h"
#include "cutlass/complex.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
// AccessWidth
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes the maximal power-of-two that evenly divides the size of T, capped at Limit
template <
typename T,
int Limit>
struct AccessWidth
{
// Inductive case
template <
int ObjectBytes, /// Size of T in bytes
int AlignBytes, /// Template induction variable
bool IsAligned = /// Whether ObjectBytes is an even multiple of AlignBytes
((AlignBytes <= Limit) && (ObjectBytes % AlignBytes == 0))>
struct Detail
{
static const int value = Detail<ObjectBytes, AlignBytes * 2>::value;
};
// Base case (ObjectBytes is not an even multiple of AlignBytes)
template <
int ObjectBytes, /// Size of T in bytes
int AlignBytes> /// Template induction variable
struct Detail<ObjectBytes, AlignBytes, false>
{
static const int value = AlignBytes / 2;
};
/// The maximal power-of-two that evenly divides the size of T
static const int value = Detail<
(int) sizeof(T),
1>::value;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// StripedAccessType
/////////////////////////////////////////////////////////////////////////////////////////////////
/// ReinterpretCast type for striping a trivially-copyable type in global memory
/// (Default specialization. Striping granularity is type T.)
template <
typename T, /// Data type
int TransferBytes = /// Data access width (16 byte max for global memory access on current architectures)
AccessWidth<T, 16>::value>
struct alignas(TransferBytes) StripedAccessType : public T
{};
/// ReinterpretCast type for striping a trivially-copyable type in global memory
/// (Specialization for cutlass::Array<T>. Striping granularity is a multiple of T.)
template <
typename T, /// Array element type
int N, /// Number of elements in array
bool RegisterSized, /// T is register-sized
int TransferBytes> /// Data access width
struct StripedAccessType<
Array<T, N, RegisterSized>,
TransferBytes>
: public AlignedArray<
T, // Element type of StripedAccessType
__NV_STD_MAX(1, TransferBytes / (int) sizeof(T)), // Number of elements T in StripedAccessType
TransferBytes> // Alignment of StripedAccessType
{};
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
/// ReinterpretCast type for striping a trivially-copyable type in global memory
/// (Specialization for cutlass::WmmaFragmentArray<T>. Striping granularity is a multiple of T.)
template<
typename Use,
int m,
int n,
int k,
typename ElementT,
typename Layout,
int kFragments,
int TransferBytes>
struct StripedAccessType<
WmmaFragmentArray<nvcuda::wmma::fragment<Use, m, n, k, ElementT, Layout>, kFragments>,
TransferBytes>
: public AlignedArray<
ElementT,
__NV_STD_MAX(1, TransferBytes / (int) sizeof(ElementT)),
TransferBytes>
{};
#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
/////////////////////////////////////////////////////////////////////////////////////////////////
// BlockStriped
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Utility for performing block-striped access (load, store) of trivially-copyable,
/// statically-sized array types to global memory
template <
int BlockThreads,
typename ArrayT,
typename AccessT = StripedAccessType<ArrayT> >
struct BlockStriped
{
/// Number of striped accesses
static const int kStripes = int(sizeof(ArrayT) / sizeof(AccessT));
static_assert(kStripes > 0, "AccessT type must be smaller than or equal to ArrayT type");
/// Load
CUTLASS_DEVICE
static void load(ArrayT &data, ArrayT *ptr, int thread_idx)
{
AccessT *access_input = reinterpret_cast<AccessT*>(ptr);
AccessT *access_data = reinterpret_cast<AccessT*>(&data);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStripes; ++i) {
access_data[i] = access_input[(BlockThreads * i) + thread_idx];
}
}
/// Load & Add
CUTLASS_DEVICE
static void load_add(ArrayT &data, ArrayT *ptr, int thread_idx)
{
AccessT *access_input = reinterpret_cast<AccessT*>(ptr);
AccessT *access_data = reinterpret_cast<AccessT*>(&data);
plus<AccessT> add;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStripes; ++i)
{
access_data[i] = add(access_data[i], access_input[(BlockThreads * i) + thread_idx]);
}
}
/// Store
CUTLASS_DEVICE
static void store(ArrayT *ptr, const ArrayT &data, int thread_idx)
{
AccessT *access_output = reinterpret_cast<AccessT*>(ptr);
const AccessT *access_data = reinterpret_cast<const AccessT*>(&data);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStripes; ++i) {
access_output[(BlockThreads * i) + thread_idx] = access_data[i];
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// BlockStripedReduce
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Utility for performing block-striped access (load, store, reduce) of trivially-copyable,
/// statically-sized array types to global memory.
/// (Default specialization)
template <
int BlockThreads,
typename ArrayT,
typename ElementT = typename StripedAccessType<ArrayT>::Element>
struct BlockStripedReduce :
BlockStriped<
BlockThreads,
ArrayT,
ElementT>
{
/// Reduce
CUTLASS_DEVICE
static void reduce(ArrayT *ptr, const ArrayT &data, int thread_idx)
{
cutlass::atomic_add<ElementT> reduce;
ElementT *access_output = reinterpret_cast<ElementT*>(ptr);
const ElementT *access_data = reinterpret_cast<const ElementT*>(&data);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < BlockStripedReduce::kStripes; ++i) {
reduce(access_output + (BlockThreads * i) + thread_idx, access_data[i]);
}
}
};
/// Utility for performing block-striped access (load, store, reduce) of trivially-copyable,
/// statically-sized array types to global memory.
/// (Specialization for half_t. Uses half2 vectorized-reduction.)
template <
int BlockThreads,
typename ArrayT>
struct BlockStripedReduce<BlockThreads, ArrayT, half_t> :
BlockStriped<
BlockThreads,
ArrayT,
half2>
{
static_assert(BlockStripedReduce::kStripes % 2 == 0, "Array of half must be even number in length");
/// Reduce
CUTLASS_DEVICE
static void reduce(ArrayT *ptr, const ArrayT &data, int thread_idx)
{
cutlass::atomic_add<half2> reduce;
half2 *access_output = reinterpret_cast<half2*>(ptr);
const half2 *access_data = reinterpret_cast<const half2*>(&data);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < BlockStripedReduce::kStripes; ++i)
{
reduce(access_output + (BlockThreads * i) + thread_idx, access_data[i]);
}
}
};
} // namespace cutlass
| include/cutlass/block_striped.h/0 | {
"file_path": "include/cutlass/block_striped.h",
"repo_id": "include",
"token_count": 3040
} | 14 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Template for device-level Implicit GEMM Convolution
*/
#pragma once
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/device_kernel.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/cuda_host_adapter.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template<typename ImplicitGemmKernel_>
class ImplicitGemmConvolution {
public:
using UnderlyingKernel = ImplicitGemmKernel_;
using ElementA = typename UnderlyingKernel::ElementA;
using LayoutA = typename UnderlyingKernel::LayoutA;
using ElementB = typename UnderlyingKernel::ElementB;
using LayoutB = typename UnderlyingKernel::LayoutB;
using ElementC = typename UnderlyingKernel::ElementC;
using LayoutC = typename UnderlyingKernel::LayoutC;
using ElementAccumulator = typename UnderlyingKernel::ElementAccumulator;
using ElementCompute = typename UnderlyingKernel::ElementCompute;
using OperatorClass = typename UnderlyingKernel::OperatorClass;
using ArchTag = typename UnderlyingKernel::ArchTag;
using ThreadblockShape = typename UnderlyingKernel::ThreadblockShape;
using WarpShape = typename UnderlyingKernel::WarpShape;
using InstructionShape = typename UnderlyingKernel::InstructionShape;
using ThreadblockSwizzle = typename UnderlyingKernel::ThreadblockSwizzle;
using EpilogueOutputOp = typename UnderlyingKernel::EpilogueOutputOp;
static int const kStages = UnderlyingKernel::kStages;
static int const kConvDim = UnderlyingKernel::kConvDim;
using WarpMmaOperator = typename UnderlyingKernel::WarpMmaOperator;
using ArchMmaOperator = typename UnderlyingKernel::ArchMmaOperator;
using MathOperator = typename UnderlyingKernel::MathOperator;
static cutlass::conv::Operator const kConvolutionalOperator = UnderlyingKernel::kConvolutionalOperator;
static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = UnderlyingKernel::kIteratorAlgorithm;
static cutlass::conv::StrideSupport const kStrideSupport = UnderlyingKernel::kStrideSupport;
static cutlass::conv::GroupMode const kGroupMode = UnderlyingKernel::kGroupMode;
static bool const kEnableCudaHostAdapter = CUTLASS_ENABLE_CUDA_HOST_ADAPTER;
static int const kWarpCount =
(ThreadblockShape::kM / WarpShape::kM) *
(ThreadblockShape::kN / WarpShape::kN) *
(ThreadblockShape::kK / WarpShape::kK);
/// Argument structure
using Arguments = typename UnderlyingKernel::Arguments;
private:
/// Kernel parameters object
typename UnderlyingKernel::Params params_;
public:
/// Constructs Implicit GEMM
ImplicitGemmConvolution() { }
/// Determines whether the Implicit GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
// dispatch to iterators
Status status = UnderlyingKernel::Mma::IteratorA::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
status = UnderlyingKernel::Mma::IteratorB::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
// check group conv constraint
if (args.problem_size.groups != 1) {
if (kGroupMode == conv::GroupMode::kNone) {
return Status::kErrorInvalidProblem;
}
// C and K should be multiple of groups
if (args.problem_size.K % args.problem_size.groups ||
args.problem_size.C % args.problem_size.groups) {
return Status::kErrorInvalidProblem;
}
// split-k is not supported
if (args.problem_size.split_k_slices != 1) {
return Status::kErrorInvalidProblem;
}
int k_per_group = args.problem_size.K / args.problem_size.groups;
// k_per_group should be multiple of ThreadblockShape N, one CTA calculate one group
if (kGroupMode == conv::GroupMode::kSingleGroup && k_per_group % ThreadblockShape::kN) {
return Status::kErrorInvalidProblem;
}
// ThreadblockShape::kN should be divisible by k_per_group, one CTA calculate multiple groups
if (kGroupMode == conv::GroupMode::kMultipleGroup && ThreadblockShape::kN % k_per_group) {
return Status::kErrorInvalidProblem;
}
// current optimized iterator algo only supports SingleGroup mode
if (kIteratorAlgorithm == IteratorAlgorithm::kOptimized &&
kGroupMode != conv::GroupMode::kSingleGroup) {
return Status::kErrorInvalidProblem;
}
}
static int const kAlignmentC = UnderlyingKernel::Epilogue::OutputTileIterator::kElementsPerAccess;
if (kConvolutionalOperator == conv::Operator::kFprop) {
if (args.problem_size.K % kAlignmentC)
return Status::kErrorMisalignedOperand;
} else if (kConvolutionalOperator == conv::Operator::kDgrad || kConvolutionalOperator == conv::Operator::kDeconv) {
if (args.problem_size.C % kAlignmentC)
return Status::kErrorMisalignedOperand;
} else if (kConvolutionalOperator == conv::Operator::kWgrad) {
if (args.problem_size.C % kAlignmentC)
return Status::kErrorMisalignedOperand;
}
// check for unsupported problem sizes for strided dgrad / deconv implementation
if ((kConvolutionalOperator == conv::Operator::kDgrad || kConvolutionalOperator == conv::Operator::kDeconv) &&
kStrideSupport == conv::StrideSupport::kStrided) {
// split-k (serial or parallel) is not supported for strided dgrad / deconv
if(args.problem_size.split_k_slices > 1) {
return Status::kErrorNotSupported;
}
// dilation > {1x1} is not supported for strided dgrad / deconv
if(args.problem_size.dilation_h > 1 || args.problem_size.dilation_w > 1) {
return Status::kErrorNotSupported;
}
}
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(
threadblock_swizzle.get_tiled_shape(
kConvolutionalOperator,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices));
if (!(grid.y <= std::numeric_limits<uint16_t>::max() &&
grid.z <= std::numeric_limits<uint16_t>::max())) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t workspace_bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
kConvolutionalOperator,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices);
if(args.split_k_mode == SplitKMode::kParallel) {
// Split-K parallel: CTAs in k-dimension write the partial results in a temporary workspace.
// The user needs to call a reduction operator to optain the final output tensor
workspace_bytes =
sizeof(ElementAccumulator) *
size_t(cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, args.problem_size)) *
size_t(grid_tiled_shape.k());
}
else if(args.split_k_mode == SplitKMode::kSerial && args.problem_size.split_k_slices > 1) {
// Split-K serial: The user workspace is used to store semaphore and serialize writing the
// final reduced output to user's output tensor
workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n());
}
return workspace_bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr,
CudaHostAdapter *cuda_adapter = nullptr) {
if (args.problem_size.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
cudaError_t status = cudaMemsetAsync(workspace, 0, get_workspace_size(args), stream);
if (status != cudaSuccess) {
return Status::kErrorInternal;
}
}
// initialize the params structure from the arguments
params_ = typename UnderlyingKernel::Params(
args,
static_cast<int *>(workspace)
);
if constexpr (kEnableCudaHostAdapter) {
CUTLASS_ASSERT(cuda_adapter);
return Status::kSuccess;
}
else {
int smem_size = int(sizeof(typename UnderlyingKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(cutlass::Kernel<UnderlyingKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
return Status::kSuccess;
}
/// Initializes GEMM state from arguments.
Status update(Arguments const &args, void *workspace = nullptr) {
// update the params structure from the arguments
params_.ptr_A = args.ref_A.data();
params_.ptr_B = args.ref_B.data();
params_.ptr_C = args.ref_C.data();
params_.ptr_D = args.ref_D.data();
params_.output_op = args.output_op;
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(32 * kWarpCount, 1, 1);
int smem_size = int(sizeof(typename UnderlyingKernel::SharedStorage));
cutlass::Status launch_result = cutlass::Status::kSuccess ;
if constexpr (kEnableCudaHostAdapter) {
//
// Use the cuda host adapter
//
CUTLASS_ASSERT(cuda_adapter);
if (cuda_adapter) {
void* kernel_params[] = {¶ms_};
launch_result = cuda_adapter->launch(
grid, dim3(1,1,1), block, smem_size, stream, kernel_params, 0
);
}
else {
launch_result = Status::kErrorInternal;
}
}
else {
cutlass::Kernel<UnderlyingKernel><<<grid, block, smem_size, stream>>>(params_);
}
cudaError_t result = cudaGetLastError();
if (cudaSuccess == result && Status::kSuccess == launch_result) {
return Status::kSuccess;
}
else {
CUTLASS_TRACE_HOST(" Kernel launch failed. Reason: " << result);
return Status::kErrorInternal;
}
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) {
return run(stream, cuda_adapter);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) {
Status status = initialize(args, workspace, stream, cuda_adapter);
if (status == Status::kSuccess) {
status = run(stream, cuda_adapter);
}
return status;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/device/implicit_gemm_convolution.h/0 | {
"file_path": "include/cutlass/conv/device/implicit_gemm_convolution.h",
"repo_id": "include",
"token_count": 4688
} | 15 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dDgradFilterTileAccessIteratorAnalytic;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradFilterTileAccessIteratorAnalytic strided dgrad needs special handling to skip MMAs
// on non-contributing w positions
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradFilterTileAccessIteratorAnalytic <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kStrided,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or larger.");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
// For a fixed filter position (r,s) find and fill offset_k_, offset_c_ in strided and contiguous dimension
int filter_r_;
int filter_s_;
int start_r_;
int start_s_;
int offset_k_[ThreadMap::Iterations::kStrided];
int offset_c_[ThreadMap::Iterations::kContiguous];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_r_(start_r),
filter_s_(start_s),
start_r_(start_r),
start_s_(start_s) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
offset_c_[c] = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] =
threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// Moves filter_s
filter_s_ += problem_size_.stride_w;
if (filter_s_ < problem_size_.S) {
return;
}
// Restore filter_s
filter_s_ = start_s_;
// Move filter_r
filter_r_ += problem_size_.stride_h;
if (filter_r_ < problem_size_.R) {
return;
}
// Restore filter_r
filter_r_ = start_r_;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the filter tensor w that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int k = offset_k_[iteration_strided_];
int c = offset_c_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements;
return TensorCoord(k, filter_r_, filter_s_, c);
}
/// Returns true if the current coordinate is within the filter tensor w
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.K && coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradFilterTileAccessIteratorAnalytic unity strided dgrad is more performant for dgrad
// on problem sizes with stride = {1x1}
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradFilterTileAccessIteratorAnalytic <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kUnity,
AccessType_
>{
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or larger.");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
// For a fixed filter position (r,s) find and fill offset_k_, offset_c_ in strided and contiguous dimension
int filter_r_;
int filter_s_;
int offset_k_[ThreadMap::Iterations::kStrided];
int offset_c_[ThreadMap::Iterations::kContiguous];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
offset_c_[c] = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] =
threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the filter tensor w that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int k = offset_k_[iteration_strided_];
int c = offset_c_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements;
return TensorCoord(k, filter_r_, filter_s_, c);
}
/// Returns true if the current coordinate is within the filter tensor w
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.K && coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 5132
} | 16 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dWgradOutputGradientTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int filter_k_[ThreadMap::Iterations::kContiguous];
int offset_npq_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
// initialize filter_k for every contiguous iteration
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
filter_k_[c] = threadblock_offset.row() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
// initialize n, p, q offset for every strided iteration
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_npq_[s] = threadblock_offset.column() + thread_coord.strided()
+ s * ThreadMap::Delta::kStrided;
}
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_npq_) in GEMM-A by a CTA-K tile
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_npq_[s] += Shape::kColumn * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the output gradient tensor Dy that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int npq = offset_npq_[iteration_strided_];
int n = npq / (problem_size_.P * problem_size_.Q);
int residual = npq % (problem_size_.P * problem_size_.Q);
int p = residual / problem_size_.Q;
int q = residual % problem_size_.Q;
int k = filter_k_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements;
return TensorCoord(n, p, q, k);
}
/// Returns true if the current coordinate is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.h() < problem_size_.P &&
coord.w() < problem_size_.Q &&
coord.c() < problem_size_.K;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 2897
} | 17 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/threadblock/depthwise_direct_conv_params.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Shape_,
typename OutputTileShape_,
typename StrideShape_,
typename DilationShape_,
typename ActivationShape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess> >
class DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation {
public:
//
// Types
//
using Shape = Shape_;
using OutputTileShape = OutputTileShape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
// Compilation value of stride , dialtion and activation shape
using StrideShape = StrideShape_;
using DilationShape = DilationShape_;
using ActivationShape = ActivationShape_;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static int const kActivationSize = ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess * ThreadMap::kThreads *
sizeof_bits<Element>::value / 8;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1");
static_assert(OutputTileShape::kN == 1, "Require OutputTileShape::kN == 1");
static_assert(OutputTileShape::kC == Shape::kColumn, "Require OutputTile shape == channels per threadblock");
//
// Parameters structure
//
using Params = Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams<Layout>;
private:
Conv2dProblemSize const &problem_size_;
Params const ¶ms_;
char const *pointer_;
// Base channels for current threadblock
int base_c_;
// Base activation index for current threadblock
int offset_intial_npq_;
// Base activation coord for current threadblock
TensorCoord activatioin_base_;
// Intial thread positioin
int offset_initial_hwc_;
// Overall load instruction per thread.
int iterator_load_;
// thread loading position.
int iterator_hwc_;
// activation N is inside the Tensor or not
bool valid_n_;
public:
CUTLASS_HOST_DEVICE
DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset =
MatrixCoord()
)
: params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
offset_intial_npq_(threadblock_offset.row()),
offset_initial_hwc_(thread_idx),
iterator_load_(0) {
base_c_ = threadblock_offset.column();
set_iteration_index(0);
set_activation_coord(offset_intial_npq_);
}
CUTLASS_HOST_DEVICE
void set_activation_coord(int offset_npq) {
int offset_inital_n, offset_inital_p, offset_inital_q;
int residual;
params_.pq_divmod(offset_inital_n, residual, offset_npq);
params_.q_divmod(offset_inital_p, offset_inital_q, residual);
int base_n = offset_inital_n;
int base_h =
offset_inital_p * OutputTileShape::kH * StrideShape::kRow - problem_size_.pad_h;
int base_w =
offset_inital_q * OutputTileShape::kW * StrideShape::kColumn - problem_size_.pad_w;
activatioin_base_ = TensorCoord(base_n, base_h, base_w, base_c_);
valid_n_ = activatioin_base_.n() < problem_size_.N;
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(
problem_size,
layout,
{Shape::kRow, Shape::kColumn},
{OutputTileShape::kN, OutputTileShape::kH, OutputTileShape::kW, OutputTileShape::kC},
kActivationSize);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iterator_hwc_ = offset_initial_hwc_ + index * ThreadMap::kThreads;
iterator_load_ = index;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// Go to next threadblock
offset_intial_npq_ += problem_size_.split_k_slices;
set_iteration_index(0);
set_activation_coord(offset_intial_npq_);
}
/// Returns the coordinate in the activations tensor X that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int c = iterator_hwc_ % ThreadMap::Detail::ShapeVec::kContiguous ;
int next = iterator_hwc_ / ThreadMap::Detail::ShapeVec::kContiguous ;
int h = next / ActivationShape::kW;
int w = next % ActivationShape::kW;
c = c * AccessType::kElements;
return activatioin_base_ + TensorCoord(0, h, w, c);
}
/// Returns true if the current coordinate is within the activations tensor X
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
bool valid_c = coord.c() < problem_size_.C;
bool valid_h = coord.h() >= 0 && coord.h() < problem_size_.H;
bool valid_w = coord.w() >= 0 && coord.w() < problem_size_.W;
return valid_n_ ? valid_c & valid_h & valid_w : 0;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
AccessType const *ptr =
reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
return ptr;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation &operator++() {
++iterator_load_;
iterator_hwc_ += ThreadMap::kThreads;
if (iterator_load_ < ThreadMap::Iterations::kCount) {
return *this;
}
iterator_load_ = 0;
iterator_hwc_ = offset_initial_hwc_;
return *this;
}
/// Determines the activation size loaded by iterator
CUTLASS_HOST_DEVICE
int get_load_size() {
return kActivationSize;
}
/// Determines the iterations needed
CUTLASS_HOST_DEVICE
int get_iteration_num() {
return ThreadMap::Iterations::kCount;
}
/// Determines whether the Depthwise fprop can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check stride and dilation constraint
if (problem_size.stride_h != StrideShape::kRow || problem_size.stride_w != StrideShape::kColumn) {
return Status::kErrorInvalidProblem;
}
if (problem_size.dilation_h != DilationShape::kRow || problem_size.dilation_w != DilationShape::kColumn) {
return Status::kErrorInvalidProblem;
}
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_fixed_stride_dilation.h/0 | {
"file_path": "include/cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_fixed_stride_dilation.h",
"repo_id": "include",
"token_count": 3553
} | 18 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level per channel scale+bias+relu before
matrix multiply-accumulate operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename FragmentActivations, typename FragmentScaleBias>
struct FpropScaleBiasReluTransform {
using T = typename FragmentActivations::Element;
static int const NumActivations = FragmentActivations::kElements;
static int const NumScaleBias = FragmentScaleBias::kElements;
static int const MmaElements = 2;
// One element has one scale and one bias
static int const MmaScaleBiasPair = 2;
// 16816 has 2 columns
static int const MmaCols = 2;
using MmaOperand = Array<T, MmaElements>;
using ScaleBiasOperand = Array<T, MmaElements * MmaScaleBiasPair>;
CUTLASS_DEVICE
void transform(MmaOperand &activations, ScaleBiasOperand const &scale_bias) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
uint32_t *ptr_activations = reinterpret_cast<uint32_t *>(&activations);
uint32_t const *ptr_scale_bias = reinterpret_cast<uint32_t const *>(&scale_bias);
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
// We assumes the pair of FP16 are either both inbound or both out-of-bound.
// It requires C to be an even number.
asm volatile(
"{\n\t"
" .reg .pred %%p;\n\t"
" .reg .b32 t1;\n\t"
" setp.eq.u32 %%p, %2, %4;\n\t"
" fma.rn.f16x2.relu t1, %1, %2, %3;\n"
" selp.u32 %0, 0, t1, %%p;\n\t"
"}\n"
: "=r"(ptr_activations[0])
: "r"(ptr_scale_bias[0]), "r"(ptr_activations[0]),
"r"(ptr_scale_bias[1]), "n"(cutlass::arch::OOB_NAN_F16x2));
#else
assert(0);
#endif
}
CUTLASS_DEVICE
void operator()(FragmentActivations &activations,
FragmentScaleBias const &scale_bias) {
MmaOperand *ptr_activations = reinterpret_cast<MmaOperand *>(&activations);
ScaleBiasOperand const *ptr_scale_bias =
reinterpret_cast<ScaleBiasOperand const *>(&scale_bias);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < (NumActivations / MmaElements); ++i) {
transform(ptr_activations[i], ptr_scale_bias[(i / MmaScaleBiasPair) % MmaCols]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename FragmentActivations, typename FragmentScaleBias>
struct WgradScaleBiasReluTransform {
using T = typename FragmentActivations::Element;
static int const NumActivations = FragmentActivations::kElements;
static int const NumScaleBias = FragmentScaleBias::kElements;
static int const MmaElements = 2;
// One element has one scale and one bias
static int const MmaScaleBiasPair = 2;
// 16816 has 2 rows
static int const MmaRows = 2;
using MmaOperand = Array<T, MmaElements>;
using ScaleBiasOperand = Array<__half2, MmaScaleBiasPair>;
CUTLASS_DEVICE
void transform(MmaOperand &activations, ScaleBiasOperand const &scale_bias) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
__half2 *ptr_activations = reinterpret_cast<__half2 *>(&activations);
uint32_t const *ptr_scale_bias = reinterpret_cast<uint32_t const *>(&scale_bias);
#if 1
// CUDA + PTX version
bool h1_oob = (reinterpret_cast<uint16_t &>(ptr_activations[0].x) == cutlass::arch::OOB_NAN_F16);
bool h2_oob = (reinterpret_cast<uint16_t &>(ptr_activations[0].y) == cutlass::arch::OOB_NAN_F16);
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
// We cannot gurantee that the pair of F16 are both in bound or both
// out-of-bound because C x R x S can be an odd number.
asm volatile(
"{\n\t"
" fma.rn.f16x2.relu %0, %1, %2, %3;\n"
"}"
: "=r"(reinterpret_cast<uint32_t &>(ptr_activations[0]))
: "r"(ptr_scale_bias[0]), "r"(reinterpret_cast<uint32_t &>(ptr_activations[0])),
"r"(ptr_scale_bias[1]));
reinterpret_cast<uint32_t &>(ptr_activations[0]) = h1_oob ?
(reinterpret_cast<uint32_t &>(ptr_activations[0]) & 0xffff0000) :
reinterpret_cast<uint32_t &>(ptr_activations[0]);
reinterpret_cast<uint32_t &>(ptr_activations[0]) = h2_oob ?
(reinterpret_cast<uint32_t &>(ptr_activations[0]) & 0xffff) :
reinterpret_cast<uint32_t &>(ptr_activations[0]);
#else
// pure PTX version
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
asm volatile(
"{\n"
" .reg .b16 t1, t2;\n"
" .reg .b32 t3, t4, t5, t6;\n"
" .reg .pred p1, p2;\n"
" mov.b32 {t1, t2}, %2;\n"
" setp.eq.s16 p1, t1, %4;\n"
" setp.eq.s16 p2, t2, %4;\n"
" fma.rn.f16x2.relu t3, %1, %2, %3;\n"
" and.b32 t4, t3, %5;\n"
" selp.b32 t5, t4, t3, p1;\n"
" and.b32 t6, t5, %6;\n"
" selp.b32 %0, t6, t5, p2;\n"
"}\n"
: "=r"(reinterpret_cast<uint32_t &>(ptr_activations[0]))
: "r"(ptr_scale_bias[0]), "r"(reinterpret_cast<uint32_t &>(ptr_activations[0])),
"r"(ptr_scale_bias[1]), "n"(cutlass::arch::OOB_NAN_F16), "n"(0xffff0000), "n"(0x0000ffff));
#endif
#else
assert(0);
#endif
}
CUTLASS_DEVICE
void operator()(FragmentActivations &activations,
FragmentScaleBias const &scale_bias) {
MmaOperand *ptr_activations = reinterpret_cast<MmaOperand *>(&activations);
ScaleBiasOperand const *ptr_scale_bias =
reinterpret_cast<ScaleBiasOperand const *>(&scale_bias);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < (NumActivations / MmaElements); ++i) {
transform(ptr_activations[i], ptr_scale_bias[(i / MmaRows)]);
}
}
};
} // namespace warp
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/warp/scale_bias_relu_transform.h/0 | {
"file_path": "include/cutlass/conv/warp/scale_bias_relu_transform.h",
"repo_id": "include",
"token_count": 3357
} | 19 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/epilogue/dispatch_policy.hpp"
#include "cute/tensor.hpp"
#include "cute/numeric/numeric_types.hpp"
#include "cute/util/type_traits.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace collective {
namespace detail {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <class Stride>
constexpr bool
is_m_major() {
return cutlass::gemm::detail::is_major<0,Stride>();
}
template <class Stride>
constexpr bool
is_n_major() {
return cutlass::gemm::detail::is_major<1,Stride>();
}
template <class Stride>
constexpr bool
is_im2col() {
return cute::is_same_v<Stride, cutlass::detail::TagToStrideC_t<cutlass::layout::TensorNWC>>
|| cute::is_same_v<Stride, cutlass::detail::TagToStrideC_t<cutlass::layout::TensorNHWC>>
|| cute::is_same_v<Stride, cutlass::detail::TagToStrideC_t<cutlass::layout::TensorNDHWC>>;
}
using cutlass::atomic_maximum;
template <class T>
static constexpr int elements_per_access_v = cutlass::sizeof_bits<uint32_t>::value / cutlass::sizeof_bits<T>::value;
template <class EpilogueSchedule>
static constexpr bool sm90_is_cooperative_v =
cute::is_base_of_v<cutlass::epilogue::TmaWarpSpecializedCooperative, EpilogueSchedule>;
template <class EpilogueSchedule>
static constexpr bool sm90_is_warp_specialized_v =
cute::is_base_of_v<cutlass::epilogue::TmaWarpSpecialized, EpilogueSchedule>;
template <class GmemLayoutTag>
static constexpr bool is_im2col_mode =
cute::is_same_v<GmemLayoutTag, cutlass::layout::TensorNWC> ||
cute::is_same_v<GmemLayoutTag, cutlass::layout::TensorNHWC> ||
cute::is_same_v<GmemLayoutTag, cutlass::layout::TensorNDHWC>;
template <class T>
struct EmptyStorage {
CUTLASS_HOST_DEVICE
T* data() { return nullptr; }
};
template<class EpilogueSchedule, class Stride>
CUTLASS_HOST_DEVICE
auto get_epilogue_stride(Stride stride){
if constexpr (cute::is_base_of_v<cutlass::gemm::EpilogueTransposed, EpilogueSchedule>) {
return cute::make_stride(cute::get<1>(stride), cute::get<0>(stride), cute::get<2>(stride));
}
else {
return stride;
}
}
template <typename ThreadEpilogueOp, typename = void>
struct IsThreadEpilogueOpWithBias {
static constexpr bool value = false;
using type = typename ThreadEpilogueOp::ElementCompute;
};
template <typename ThreadEpilogueOp>
struct IsThreadEpilogueOpWithBias <ThreadEpilogueOp, cute::void_t<typename ThreadEpilogueOp::ElementBias>> {
static constexpr bool value = true;
using type = typename ThreadEpilogueOp::ElementBias;
};
template <typename ThreadEpilogueOp, typename = void>
struct IsThreadEpilogueOpWithPerChannelScaling {
static constexpr bool value = false;
};
template <typename ThreadEpilogueOp>
struct IsThreadEpilogueOpWithPerChannelScaling <ThreadEpilogueOp, cute::enable_if_t<ThreadEpilogueOp::IsPerChannelScalingSupported>> {
static constexpr bool value = true;
};
template <typename ThreadEpilogueOp, typename = void>
struct IsThreadEpilogueOpWithActivation {
static constexpr bool value = false;
using type = void;
};
template <typename ThreadEpilogueOp>
struct IsThreadEpilogueOpWithActivation <ThreadEpilogueOp, cute::enable_if_t<ThreadEpilogueOp::IsEltActSupported>> {
static constexpr bool value = true;
using type = typename ThreadEpilogueOp::ActivationFn;
};
// Wrapper class to use operator-style epilogues in sm90 TMA warp-specialized kernels
template <class EpilogueOp>
class Sm90TmaWarpSpecializedAdapter : public EpilogueOp {
public:
using GmemTiledCopyC = void;
using GmemTiledCopyD = void;
using LoadPipeline = cutlass::PipelineTransactionAsync<0>;
using LoadPipelineState = cutlass::PipelineState<0>;
constexpr static uint32_t TmaTransactionBytes = 0;
using StorePipeline = cutlass::PipelineTmaStore<0>;
using StorePipelineState = cutlass::PipelineState<0>;
using TensorStorage = typename EpilogueOp::SharedStorage;
using PipelineStorage = typename LoadPipeline::SharedStorage;
template<class TileShapeMNK>
CUTLASS_HOST_DEVICE
static constexpr int
get_load_pipe_increment([[maybe_unused]] TileShapeMNK) {
return 1;
}
template<class TileShapeMNK>
CUTLASS_HOST_DEVICE
static constexpr int
get_store_pipe_increment([[maybe_unused]] TileShapeMNK) {
return 1;
}
CUTLASS_DEVICE
static void prefetch_tma_descriptors([[maybe_unused]] typename EpilogueOp::Params const&) {
}
// ctor inheritance
using EpilogueOp::EpilogueOp;
CUTLASS_HOST_DEVICE
Sm90TmaWarpSpecializedAdapter(
typename EpilogueOp::Params const& params,
[[maybe_unused]] TensorStorage& shared_tensors)
: EpilogueOp(params) { }
CUTLASS_DEVICE
bool
is_producer_load_needed() const {
return false;
}
template<
class ProblemShapeMNKL,
class TileShapeMNK,
class TileCoordMNKL,
class TiledMma
>
CUTLASS_DEVICE auto
load(
[[maybe_unused]] LoadPipeline load_pipeline,
LoadPipelineState load_pipe_producer_state,
[[maybe_unused]] ProblemShapeMNKL problem_shape_mnkl,
[[maybe_unused]] TileShapeMNK tile_shape_MNK,
[[maybe_unused]] TileCoordMNKL tile_coord_mnkl,
[[maybe_unused]] TiledMma tiled_mma,
[[maybe_unused]] int thread_idx,
[[maybe_unused]] TensorStorage& shared_tensors,
[[maybe_unused]] int subtile_idx=-1)
{
return load_pipe_producer_state;
}
CUTLASS_DEVICE auto
load_tail(
[[maybe_unused]] LoadPipeline load_pipeline,
LoadPipelineState load_pipe_producer_state)
{
return load_pipe_producer_state;
}
template<
class ProblemShapeMNKL,
class TileShapeMNK,
class TileCoordMNKL,
class AccEngine, class AccLayout,
class TiledMma
>
CUTLASS_DEVICE auto
store(
[[maybe_unused]] LoadPipeline load_pipeline,
LoadPipelineState load_pipe_consumer_state,
[[maybe_unused]] StorePipeline store_pipeline,
StorePipelineState store_pipe_producer_state,
ProblemShapeMNKL problem_shape_mnkl,
TileShapeMNK tile_shape_MNK,
TileCoordMNKL tile_coord_mnkl,
cute::Tensor<AccEngine,AccLayout> accumulators,
TiledMma tiled_mma,
int thread_idx,
TensorStorage& shared_tensors,
int subtile_index = -1)
{
constexpr int BLK_M_RANK = cute::rank<0>(tile_shape_MNK);
auto m_max_coord = unwrap(cute::transform(make_seq<BLK_M_RANK>{}, [&](auto i) {
return get<0,i>(problem_shape_mnkl) - get<0,i>(tile_shape_MNK) * get<0,i>(tile_coord_mnkl);
}));
constexpr int BLK_N_RANK = cute::rank<1>(tile_shape_MNK);
auto n_max_coord = unwrap(cute::transform(make_seq<BLK_N_RANK>{}, [&](auto i) {
return get<1,i>(problem_shape_mnkl) - get<1,i>(tile_shape_MNK) * get<1,i>(tile_coord_mnkl);
}));
auto residue_mnk = make_tuple(m_max_coord, n_max_coord, Int<0>{});
(*this)(
problem_shape_mnkl,
tile_shape_MNK,
tile_coord_mnkl,
accumulators,
tiled_mma,
residue_mnk,
thread_idx,
reinterpret_cast<char*>(&shared_tensors));
return cute::make_tuple(load_pipe_consumer_state, store_pipe_producer_state);
}
CUTLASS_DEVICE auto
store_tail(
[[maybe_unused]] LoadPipeline load_pipeline,
LoadPipelineState load_pipe_consumer_state,
[[maybe_unused]] StorePipeline store_pipeline,
StorePipelineState store_pipe_producer_state) {
return cute::make_tuple(load_pipe_consumer_state, store_pipe_producer_state);
}
};
} // namespace detail
} // namespace collective
} // namespace epilogue
} // namespace cutlass
| include/cutlass/epilogue/collective/detail.hpp/0 | {
"file_path": "include/cutlass/epilogue/collective/detail.hpp",
"repo_id": "include",
"token_count": 3466
} | 20 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/scale_type.h"
#include "cutlass/epilogue/thread/linear_combination_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements.
///
/// D = alpha * accumulator + beta * source
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation.
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
typename ElementSource_ = ElementOutput_
>
class LinearCombination {
public:
using ElementOutput = ElementOutput_;
using ElementSource = ElementSource_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementScalar = ElementCompute;
using ElementC = ElementSource_;
using ElementD = ElementOutput_;
static int const kCount = Count;
static const ScaleType::Kind kScale = Scale;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentSource = Array<ElementSource, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentCompute = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params
{
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
ElementCompute const* const* alpha_ptr_array; ///< array of pointers to accumulator scalar per group/batch
ElementCompute const* const* beta_ptr_array; ///< array of pointers to source scalar per group/batch
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr),
alpha_ptr_array(nullptr),
beta_ptr_array(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta
):
alpha(alpha), beta(beta),
alpha_ptr(nullptr), beta_ptr(nullptr),
alpha_ptr_array(nullptr), beta_ptr_array(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha
):
alpha(alpha), beta(0),
alpha_ptr(nullptr), beta_ptr(nullptr),
alpha_ptr_array(nullptr), beta_ptr_array(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr
):
alpha(0), beta(0),
alpha_ptr(alpha_ptr), beta_ptr(beta_ptr),
alpha_ptr_array(nullptr), beta_ptr_array(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr
):
alpha(0), beta(0),
alpha_ptr(alpha_ptr), beta_ptr(nullptr),
alpha_ptr_array(nullptr), beta_ptr_array(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute const* const* alpha_ptr_array,
ElementCompute const* const* beta_ptr_array
):
alpha(0), beta(0),
alpha_ptr(nullptr), beta_ptr(nullptr),
alpha_ptr_array(alpha_ptr_array), beta_ptr_array(beta_ptr_array) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute const* const* alpha_ptr_array
):
alpha(0), beta(0),
alpha_ptr(nullptr), beta_ptr(nullptr),
alpha_ptr_array(alpha_ptr_array), beta_ptr_array(nullptr) { }
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombination(Params const ¶ms, int group_idx = 0) {
if (params.alpha_ptr_array != nullptr && params.alpha_ptr_array[group_idx] != nullptr) {
alpha_ = *(params.alpha_ptr_array[group_idx]);
}
else if (params.alpha_ptr != nullptr) {
alpha_ = *params.alpha_ptr;
}
else {
alpha_ = params.alpha;
}
if (params.beta_ptr_array != nullptr && params.beta_ptr_array[group_idx] != nullptr) {
beta_ = *(params.beta_ptr_array[group_idx]);
}
else if (params.beta_ptr != nullptr) {
beta_ = *params.beta_ptr;
}
else {
beta_ = params.beta;
}
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
}
/// Computes linear scaling with source: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentSource const &source) const {
// Convert source to internal compute numeric type
NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
if (Scale == ScaleType::Nothing)
return destination_converter(converted_accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
if (Scale == ScaleType::NoBetaScaling)
intermediate = converted_source;
else
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
if (Scale == ScaleType::Nothing)
return destination_converter(converted_accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_accumulator;
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
return destination_converter(intermediate);
}
//
// Specializations for scalar (for use with cute::collective::DefaultEpilogue)
//
CUTLASS_HOST_DEVICE
ElementD operator()(ElementAccumulator const accumulator, ElementC const source) const {
// Convert everything to Compute type, do compute, and then store to output type
NumericConverter<ElementCompute, ElementAccumulator, Round> accumulator_converter;
[[maybe_unused]] NumericConverter<ElementCompute, ElementC, Round> source_converter;
NumericConverter<ElementD, ElementCompute, Round> destination_converter;
// Convert to destination numeric type
ElementCompute converted_accumulator = accumulator_converter(accumulator);
if constexpr (Scale == ScaleType::Nothing) {
return destination_converter(converted_accumulator);
}
// Perform binary operations
ElementCompute intermediate;
multiplies<ElementCompute> multiply;
multiply_add<ElementCompute> madd;
if constexpr (Scale == ScaleType::NoBetaScaling) {
intermediate = source_converter(source);
}
else {
intermediate = multiply(beta_, source); // X = beta * C + uniform
}
intermediate = madd(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
return destination_converter(intermediate);
}
CUTLASS_HOST_DEVICE
ElementD operator()(ElementAccumulator const accumulator) const {
// Convert everything to Compute type, do compute, and then store to output type
NumericConverter<ElementCompute, ElementAccumulator, Round> accumulator_converter;
NumericConverter<ElementD, ElementCompute, Round> destination_converter;
ElementCompute converted_accumulator = accumulator_converter(accumulator);
// Convert to destination numeric type
if constexpr (Scale == ScaleType::Nothing) {
return destination_converter(converted_accumulator);
}
// Perform binary operations
ElementCompute intermediate;
multiplies<ElementCompute> multiply;
intermediate = multiply(alpha_, accumulator); // D = alpha * Accum
return destination_converter(intermediate);
}
};
/// Applies a linear combination operator to an array of elements.
///
/// D = vector_alpha * accumulator + (optional) vector_beta/scalar_beta * source
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation.
typename ElementAccumulator_, ///< Accumulator data type
typename ElementCompute_, ///< Data type used to compute linear combination
FloatRoundStyle Round,
typename ElementSource_
>
class LinearCombination<ElementOutput_,
Count,
ElementAccumulator_,
ElementCompute_,
ScaleType::PerChannelScaling,
Round,
ElementSource_> {
public:
using ElementOutput = ElementOutput_;
using ElementSource = ElementSource_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementC = ElementSource_;
using ElementD = ElementOutput_;
static int const kCount = Count;
static const ScaleType::Kind kScale = ScaleType::PerChannelScaling;
static constexpr bool IsPerChannelScalingSupported = true;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentSource = Array<ElementSource, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentCompute = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params
{
ElementCompute const *alpha_ptr; ///< pointer to accumulator vector
ElementCompute const *beta_ptr; ///< pointer to source vector
ElementCompute beta; ///< scales source tensor
CUTLASS_HOST_DEVICE
Params():
alpha_ptr(nullptr),
beta_ptr(nullptr),
beta(ElementCompute(0)) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr
):
alpha_ptr(alpha_ptr), beta_ptr(beta_ptr), beta(ElementCompute(0)) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr
):
alpha_ptr(alpha_ptr), beta_ptr(nullptr), beta(ElementCompute(0)) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute beta
):
alpha_ptr(alpha_ptr), beta_ptr(nullptr), beta(beta) { }
};
private:
//
// Data members
//
ElementCompute const* beta_ptr_ = nullptr;
ElementCompute beta_ = 0;
public:
/// Constructs the function object
CUTLASS_HOST_DEVICE
LinearCombination(Params const& params) {
if (params.beta_ptr) {
beta_ptr_ = params.beta_ptr;
}
else {
beta_ = params.beta;
}
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return beta_ptr_ != nullptr || beta_ != ElementCompute(0);
}
CUTLASS_HOST_DEVICE
bool is_beta_vector() const {
return beta_ptr_ != nullptr;
}
/// Computes linear scaling with source: D = vector_alpha * accumulator + vector_beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const& accumulator,
FragmentSource const& source,
FragmentCompute const& valpha,
FragmentCompute const& vbeta) const {
// Convert source to internal compute numeric type
NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
intermediate = mul_add_source(vbeta, converted_source); // X = vector_beta * C + uniform
intermediate = mul_add_accumulator(valpha, converted_accumulator, intermediate); // D = vector_alpha * Accum + X
return destination_converter(intermediate);
}
/// Computes linear scaling with source: D = vector_alpha * accumulator + scalar_beta(from host) * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const& accumulator,
FragmentSource const& source,
FragmentCompute const& valpha) const {
// Convert source to internal compute numeric type
NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
intermediate = mul_add_source(beta_, converted_source); // X = scalar_beta * C + uniform
intermediate = mul_add_accumulator(valpha, converted_accumulator, intermediate); // D = vector_alpha * Accum + X
return destination_converter(intermediate);
}
/// Computes linear scaling: D = vector_alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const& accumulator,
FragmentCompute const& valpha) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_accumulator;
intermediate = mul_accumulator(valpha, converted_accumulator); // D = vector_alpha * Accum
return destination_converter(intermediate);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/linear_combination.h/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination.h",
"repo_id": "include",
"token_count": 6545
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/threadblock/epilogue_with_reduction.h"
#include "cutlass/layout/permute.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename OutputOp,
typename ReductionOp,
int ElementsPerAccess,
bool ScatterD = false,
typename PermuteDLayout = layout::NoPermute
>
struct DefaultEpilogueWithReductionTensorOp {
/// Use defaults related to the existing epilogue
using Base = DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
ElementsPerAccess
>;
/// Additional tensor tile iterator
using TensorTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
typename Base::OutputTileThreadMap,
typename OutputOp::ElementTensor
>;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
typename Base::OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout
>;
/// Define the epilogue
using Epilogue = EpilogueWithReduction<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputTileIterator,
TensorTileIterator,
typename WarpMmaTensorOp::ElementC,
typename Base::AccumulatorFragmentIterator,
typename Base::WarpTileIterator,
typename Base::SharedLoadIterator,
typename Base::OutputOp,
ReductionOp,
typename Base::Padding
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename OutputOp,
typename ReductionOp,
int ElementsPerAccess,
bool ScatterD = false,
typename PermuteDLayout = layout::NoPermute
>
struct DefaultEpilogueWithReductionVoltaTensorOp {
/// Use defaults related to the existing epilogue
using Base = DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
ElementsPerAccess
>;
/// Additional tensor tile iterator
using TensorTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
typename Base::OutputTileThreadMap,
typename OutputOp::ElementTensor
>;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
typename Base::OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout
>;
/// Define the epilogue
using Epilogue = EpilogueWithReduction<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputTileIterator,
TensorTileIterator,
typename WarpMmaTensorOp::ElementC,
typename Base::AccumulatorFragmentIterator,
typename Base::WarpTileIterator,
typename Base::SharedLoadIterator,
typename Base::OutputOp,
ReductionOp,
typename Base::Padding
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/default_epilogue_with_reduction.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/default_epilogue_with_reduction.h",
"repo_id": "include",
"token_count": 1695
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue visitor for threadblock scoped GEMMs that process softmax computations in epilogue.
The epilogue finds max values in each row of the row-major output matrix and stores them.
The max values are also used for a further round of threadblock scoped reduction operation, where
the partial reduction results are stored in a pre-allocated array and used for further full reduction.
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/fast_math.h"
namespace cutlass {
namespace epilogue {
namespace threadblock {
template <
typename ThreadblockShape_,
int ThreadCount,
typename OutputTileIterator_,
typename ElementAccumulator_,
typename ElementNorm_,
typename ElementSum_,
typename ElementSoftmaxCompute_,
typename ElementwiseFunctor_,
bool UseMasking_ = false
>
class EpilogueVisitorSoftmax {
public:
using ThreadblockShape = ThreadblockShape_;
static int const kThreadCount = ThreadCount;
using OutputTileIterator = OutputTileIterator_;
using ElementwiseFunctor = ElementwiseFunctor_;
static int const kIterations = OutputTileIterator::kIterations;
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
using ElementOutput = typename OutputTileIterator::Element;
using LayoutOutput = cutlass::layout::RowMajor;
using ElementAccumulator = ElementAccumulator_;
using ElementNorm = ElementNorm_;
using ElementSum = ElementSum_;
using ElementSoftmaxCompute = ElementSoftmaxCompute_;
using AccumulatorFragment = Array<ElementAccumulator, kElementsPerAccess>;
using SoftmaxFragment = Array<ElementSoftmaxCompute, kElementsPerAccess>;
using OutputVector = Array<ElementOutput, kElementsPerAccess>;
using TensorRefD = TensorRef<ElementOutput, LayoutOutput>;
static int const kThreadsPerRow = OutputTileIterator::ThreadMap::Detail::kAccessWidth;
static bool const kHasMultiStepsInRow = (OutputTileIterator::ThreadMap::Iterations::kColumn > 1);
static bool const kUseMasking = UseMasking_;
/// Argument structure
struct Arguments {
typename ElementwiseFunctor::Params elementwise;
int64_t batch_stride_C;
int64_t batch_stride_D;
int64_t batch_stride_Max;
int64_t batch_stride_Sum;
//
// Methods
//
Arguments():
batch_stride_C(0),
batch_stride_D(0),
batch_stride_Max(0),
batch_stride_Sum(0)
{
}
Arguments(
typename ElementwiseFunctor::Params elementwise_
):
elementwise(elementwise_),
batch_stride_C(0),
batch_stride_D(0),
batch_stride_Max(0),
batch_stride_Sum(0)
{
}
Arguments(
typename ElementwiseFunctor::Params elementwise_,
int64_t batch_stride_C_,
int64_t batch_stride_D_,
int64_t batch_stride_Max_,
int64_t batch_stride_Sum_
):
elementwise(elementwise_),
batch_stride_C(batch_stride_C_),
batch_stride_D(batch_stride_D_),
batch_stride_Max(batch_stride_Max_),
batch_stride_Sum(batch_stride_Sum_)
{
}
};
struct Params {
typename ElementwiseFunctor::Params elementwise;
int64_t batch_stride_C;
int64_t batch_stride_D;
int64_t batch_stride_Max;
int64_t batch_stride_Sum;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params()
{
}
CUTLASS_HOST_DEVICE
Params(Arguments const &args):
elementwise(args.elementwise),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
batch_stride_Max(args.batch_stride_Max),
batch_stride_Sum(args.batch_stride_Sum)
{
}
};
/// Shared storage
struct SharedStorage {
};
private:
Params const & params_;
SharedStorage & shared_storage_;
MatrixCoord extent_;
MatrixCoord extent_real_;
ElementwiseFunctor elementwise_;
OutputTileIterator iterator_C_;
OutputTileIterator iterator_D_;
typename OutputTileIterator::Fragment fragment_C_;
typename OutputTileIterator::Fragment fragment_D_;
ElementAccumulator alpha_;
ElementAccumulator beta_;
ElementNorm *ptr_Max_;
ElementSum *ptr_Sum_;
int column_offset_;
ElementSoftmaxCompute accum_max_;
ElementSoftmaxCompute accum_sum_;
MatrixCoord thread_offset_;
float infinity_;
public:
CUTLASS_DEVICE
EpilogueVisitorSoftmax(
Params const ¶ms,
SharedStorage &shared_storage,
cutlass::MatrixCoord const &problem_size,
int thread_idx,
int warp_idx,
int lane_idx,
typename OutputTileIterator::Params params_C,
typename OutputTileIterator::Params params_D,
typename OutputTileIterator::Element *ptr_C,
typename OutputTileIterator::Element *ptr_D,
ElementNorm *ptr_Max = nullptr,
ElementSum *ptr_Sum = nullptr,
cutlass::MatrixCoord const &threadblock_offset = cutlass::MatrixCoord(0, 0),
int column_offset = 0,
cutlass::MatrixCoord const &problem_size_real = cutlass::MatrixCoord(0, 0),
float infinity = 10000.0f
):
params_(params),
shared_storage_(shared_storage),
extent_(problem_size),
elementwise_(params.elementwise),
iterator_C_(params_C, ptr_C, problem_size, thread_idx, threadblock_offset),
iterator_D_(params_D, ptr_D, problem_size, thread_idx, threadblock_offset),
ptr_Max_(ptr_Max),
ptr_Sum_(ptr_Sum),
column_offset_(column_offset),
extent_real_(problem_size_real),
infinity_(infinity)
{
alpha_ = (params.elementwise.alpha_ptr ? *params.elementwise.alpha_ptr : params.elementwise.alpha);
beta_ = (params.elementwise.beta_ptr ? *params.elementwise.beta_ptr : params.elementwise.beta);
if (beta_ == ElementAccumulator()) {
iterator_C_.clear_mask();
}
}
/// Helper to indicate split-K behavior
CUTLASS_DEVICE
void set_k_partition(
int split_k_index, ///< Index of this threadblock within split-K partitioned scheme
int split_k_slices) { ///< Total number of split-K slices
}
/// Called to set the batch index
CUTLASS_DEVICE
void set_batch_index(int batch_idx) {
iterator_C_.add_pointer_offset(batch_idx * params_.batch_stride_C);
iterator_D_.add_pointer_offset(batch_idx * params_.batch_stride_D);
}
/// Called at the start of the epilogue just before iterating over accumulator slices
CUTLASS_DEVICE
void begin_epilogue() {
}
/// Called at the start of one step before starting accumulator exchange
CUTLASS_DEVICE
void begin_step(int step_idx) {
fragment_D_.clear();
fragment_C_.clear();
if (elementwise_.kScale != cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling) {
iterator_C_.load(fragment_C_);
++iterator_C_;
}
}
/// Called at the start of a row
CUTLASS_DEVICE
void begin_row(int row_idx) {
// Clear accumulators for max and sum when starting a whole row
clear_accum_();
}
/// Called after accumulators have been exchanged for each accumulator vector
CUTLASS_DEVICE
void visit(
int iter_idx,
int row_idx,
int column_idx,
int frag_idx,
AccumulatorFragment const &accum) {
using Mul = cutlass::multiplies<SoftmaxFragment>;
using Minus = cutlass::minus<SoftmaxFragment>;
using Exp = cutlass::fast_exp_op<SoftmaxFragment>;
Minus minus;
Exp exponential;
SoftmaxFragment result;
NumericArrayConverter<ElementSoftmaxCompute, ElementOutput, kElementsPerAccess> source_converter;
OutputVector &source_vector = reinterpret_cast<OutputVector *>(&fragment_C_)[frag_idx];
if (elementwise_.kScale == cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling) {
result = source_converter(elementwise_(accum));
}else{
result = source_converter(elementwise_(accum, source_vector));
}
thread_offset_ =
iterator_D_.thread_start() +
OutputTileIterator::ThreadMap::iteration_offset(frag_idx);
bool column_guard = (thread_offset_.column() < extent_.column());
if (kUseMasking) {
int elements_in_boundary = extent_real_.column() - thread_offset_.column();
elements_in_boundary = (elements_in_boundary > kElementsPerAccess) ? kElementsPerAccess : elements_in_boundary;
elementwise_padding_(result, elements_in_boundary);
}
ElementSoftmaxCompute accum_max_prev = accum_max_;
// Compute the maximum within one row
if (!column_idx) {
// This is the first fragment in a new row
if (column_guard) {
accum_max_ = maximum_accumulator_(result);
}
}
else {
// This is an additional fragment in the same row
if (column_guard) {
accum_max_ = maximum_accumulator_(result, accum_max_);
}
}
// proactively compute max in warps
accum_max_ = warp_reduce_max_(accum_max_);
ElementSoftmaxCompute updater = fast_exp(accum_max_prev - accum_max_);
SoftmaxFragment intermediate = exponential(minus(result, accum_max_));
if (kHasMultiStepsInRow) {
if (!column_idx) {
accum_sum_ = (column_guard) ? \
sum_accumulator_(intermediate) : ElementSoftmaxCompute(0);
} else {
// Algorithm in $3.1, https://arxiv.org/pdf/2205.14135v1.pdf
// S* = S* x updater + sum_row(P'), where updater = exp(M* - M_row)
accum_sum_ = (column_guard) ? \
sum_accumulator_(intermediate, accum_sum_ * updater) : accum_sum_ * updater;
}
} else {
accum_sum_ = (column_guard) ? sum_accumulator_(intermediate, accum_sum_) : ElementSoftmaxCompute(0);
}
// Convert to the output
NumericArrayConverter<ElementOutput, ElementSoftmaxCompute, kElementsPerAccess> output_converter;
OutputVector &output = reinterpret_cast<OutputVector *>(&fragment_D_)[frag_idx];
output = output_converter(result);
}
/// Called at the end of a row
CUTLASS_DEVICE
void end_row(int row_idx) {
using ConvertSumOutput = cutlass::NumericConverter<ElementSum, ElementSoftmaxCompute>;
using ConvertNormOutput = cutlass::NumericConverter<ElementNorm, ElementSoftmaxCompute>;
ConvertSumOutput convert_sum_output;
ConvertNormOutput convert_norm_output;
// Compute accumulate sum only in the last step
accum_sum_ = warp_reduce_sum_(accum_sum_);
bool is_first_thread_in_tile = ((threadIdx.x % kThreadsPerRow) == 0);
bool row_guard = thread_offset_.row() < extent_.row();
bool is_write_thread = row_guard && is_first_thread_in_tile;
int block_batch = blockIdx.z;
ElementNorm *curr_ptr_max = ptr_Max_ + thread_offset_.row() + column_offset_ + block_batch * params_.batch_stride_Max;
ElementSum *curr_ptr_sum = ptr_Sum_ + thread_offset_.row() + column_offset_ + block_batch * params_.batch_stride_Sum;
arch::global_store<ElementNorm, sizeof(ElementNorm)>(
convert_norm_output(accum_max_),
(void *)curr_ptr_max,
is_write_thread);
arch::global_store<ElementSum, sizeof(ElementSum)>(
convert_sum_output(accum_sum_),
(void *)curr_ptr_sum,
is_write_thread);
// Clear accumulators for max and sum when finishing a whole row
clear_accum_();
}
/// Called after all accumulator elements have been visited
CUTLASS_DEVICE
void end_step(int step_idx) {
iterator_D_.store(fragment_D_);
++iterator_D_;
}
/// Called after all steps have been completed
CUTLASS_DEVICE
void end_epilogue() {
}
private:
CUTLASS_DEVICE
void elementwise_padding_(SoftmaxFragment &result, int elements_in_boundary) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < SoftmaxFragment::kElements; ++i) {
result[i] = (i < elements_in_boundary) ? result[i] : ElementSoftmaxCompute(-infinity_);
}
}
CUTLASS_DEVICE
ElementSoftmaxCompute warp_reduce_sum_(ElementSoftmaxCompute sum_) {
int half_thread_in_row = (kThreadsPerRow >> 1);
CUTLASS_PRAGMA_UNROLL
for (int i = half_thread_in_row; i > 0; i >>= 1) {
ElementSoftmaxCompute tmp = __shfl_xor_sync(0xFFFFFFFF, sum_, i);
sum_ += tmp;
}
return sum_;
}
CUTLASS_DEVICE
ElementSoftmaxCompute warp_reduce_max_(ElementSoftmaxCompute max_) {
int half_thread_in_row = (kThreadsPerRow >> 1);
CUTLASS_PRAGMA_UNROLL
for (int i = half_thread_in_row; i > 0; i >>= 1) {
ElementSoftmaxCompute tmp = __shfl_xor_sync(0xFFFFFFFF, max_, i);
max_ = fast_max(max_, tmp);
}
return max_;
}
CUTLASS_DEVICE
void clear_accum_() {
uint32_t float_max_bits = 0xff7fffff; // -FLT_MAX
float min_float = reinterpret_cast<float const &>(float_max_bits);
accum_max_ = ElementSoftmaxCompute(min_float);
accum_sum_ = ElementSoftmaxCompute(0);
}
CUTLASS_DEVICE
ElementSoftmaxCompute sum_accumulator_(SoftmaxFragment const &accum) {
ElementSoftmaxCompute sum_ = ElementSoftmaxCompute(0);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < SoftmaxFragment::kElements; ++i) {
sum_ += ElementSoftmaxCompute(accum[i]);
}
return sum_;
}
CUTLASS_DEVICE
ElementSoftmaxCompute sum_accumulator_(SoftmaxFragment const &accum, ElementSoftmaxCompute sum_) {
// ElementSoftmaxCompute sum_ = ElementSoftmaxCompute(0);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < SoftmaxFragment::kElements; ++i) {
sum_ += ElementSoftmaxCompute(accum[i]);
}
return sum_;
}
CUTLASS_DEVICE
ElementSoftmaxCompute maximum_accumulator_(SoftmaxFragment const &accum) {
ElementSoftmaxCompute max_ = accum[0];
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < SoftmaxFragment::kElements; ++i) {
max_ = fast_max(max_, ElementSoftmaxCompute(accum[i]));
}
return max_;
}
CUTLASS_DEVICE
ElementSoftmaxCompute maximum_accumulator_(SoftmaxFragment const &accum, ElementSoftmaxCompute max_) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < SoftmaxFragment::kElements; ++i) {
max_ = fast_max(max_, ElementSoftmaxCompute(accum[i]));
}
return max_;
}
};
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
| include/cutlass/epilogue/threadblock/epilogue_visitor_with_softmax.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_visitor_with_softmax.h",
"repo_id": "include",
"token_count": 6697
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
/// It provides a fast path for the case Rank = 2 which does not need div/rem to
/// calculate modes.
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
int Rank
>
class PredicatedTileIteratorAffineRankN {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::AffineRankN<Rank>;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = typename Layout::TensorCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
static_assert( !(Layout::kRank % 2),
"Layout rank must be even. This assumes the first half of the modes correspond to the 'row' "
"and the second half of the modes correspond to the 'column'");
static bool const kBigEndian = false;
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Parameters structure
struct Params {
//
// Data members
//
Layout layout;
/// Stride in units of bytes along M modes
Coord<Layout::kRank/2, typename Layout::LongIndex> stride_m;
/// Stride in units of bytes along N modes
Coord<Layout::kRank/2, typename Layout::LongIndex> stride_n;
/// Fast divmod objects divided by tensor extents
FastDivmod divmod_m[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)];
/// Fast divmod objects divided by tensor extents
FastDivmod divmod_n[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)];
int64_t rank2_inc_col;
int64_t rank2_inc_row;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(TensorCoord const &extent, Layout const &layout_): layout(layout_) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2; ++i) {
stride_m[i] = OffsetBytes<Element>(layout_.stride()[i]);
stride_n[i] = OffsetBytes<Element>(layout_.stride()[i + Layout::kRank / 2]);
}
if (kBigEndian) {
// "Big Endian" scheme
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
divmod_m[i] = FastDivmod(extent[i + 1]);
divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2 + 1]);
}
}
else {
// "Little Endian" scheme
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
divmod_m[i] = FastDivmod(extent[i]);
divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2]);
}
}
#if 0
//
// Debug print statements to verify extents and strides are passed correctly.
//
printf("PredicatedTileIteratorAffine::Params() entered\n");
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank; ++i) {
printf(" extent[%d]: %d\n", i, extent[i]);
}
for (int i = 0; i < Layout::kRank; ++i) {
printf(" stride[%d]: %ld\n", i, layout_.stride()[i]);
}
printf("PredicatedTileIteratorAffine::Params() returning\n");
#endif
}
CUTLASS_HOST_DEVICE
Params(Layout const &layout_): layout(layout_) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2; ++i) {
stride_m[i] = OffsetBytes<Element>(layout_.stride()[i]);
stride_n[i] = OffsetBytes<Element>(layout_.stride()[i + Layout::kRank / 2]);
}
rank2_inc_col = ThreadMap::Delta::kColumn * stride_n[0];
rank2_inc_row = ThreadMap::Delta::kRow * stride_m[0];
}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
Params params_;
/// Byte-level pointer
uint8_t *byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in columns
Index extent_col_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// A thread's starting column position (assuming steady-state predicates have been computed)
Index thread_start_column_;
/// Internal state counter
int state_[3];
/// Offsets in columns, cached for performance
int64_t offset_modes_n_[ThreadMap::Iterations::kColumn];
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorAffineRankN(
Params const & params,
Element *pointer,
MatrixCoord extent,
int thread_idx,
MatrixCoord threadblock_offset = MatrixCoord(),
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
params_(params)
{
MatrixCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
extent_col_ = extent.column();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
if (Layout::kRank > 2) {
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
//
// Compute coordinate and decompose into N modes
//
int coord_n = thread_start_column_ + c * ThreadMap::Delta::kColumn;
mask_.predicates[c] = coord_n < extent.column();
Coord<Layout::kRank / 2, Index> modes_n;
int64_t offset_modes_n = 0;
if (kBigEndian) {
modes_n = CoordinateDecomposition<Layout::kRank / 2>(coord_n, params_.divmod_n);
offset_modes_n = dot(modes_n, params_.stride_n);
}
else {
modes_n = CoordinateDecompositionLittleEndian<Layout::kRank / 2>(coord_n, params_.divmod_n);
offset_modes_n = dot(modes_n, params_.stride_n);
}
offset_modes_n_[c] = offset_modes_n;
}
if (!pointer) {
mask_.clear();
}
}
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t *>(pointer);
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, int64_t byte_offset) {
uint8_t const *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
int row_begin = thread_start_row_ + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster;
int64_t offset_modes_m = row_begin * params_.stride_m[0];
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
//
// Compute coordinate and decompose into M modes
//
int coord_m = row * ThreadMap::Delta::kRow + row_begin;
Coord<Layout::kRank / 2, Index> modes_m;
if (Layout::kRank > 2) {
if (kBigEndian) {
modes_m = CoordinateDecomposition<Layout::kRank / 2>(coord_m, params_.divmod_m);
} else {
modes_m = CoordinateDecompositionLittleEndian<Layout::kRank / 2>(coord_m, params_.divmod_m);
}
offset_modes_m = dot(modes_m, params_.stride_m);
}
//
// Compute the offset due to modes M
//
bool row_guard = (coord_m < extent_row_);
int64_t offset_modes_n = thread_start_column_ * params_.stride_n[0];
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
//
// Compute coordinate and decompose into N modes
//
if (Layout::kRank > 2) {
offset_modes_n = offset_modes_n_[column];
}
//
// Compute the pointer and access
//
bool guard;
if (Layout::kRank > 2) {
guard = row_guard && mask_.predicates[column];
} else {
guard = (coord_m < extent_row_) &&
((thread_start_column_ + ThreadMap::Delta::kColumn * column) < extent_col_);
}
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void *)(byte_pointer + offset_modes_m + offset_modes_n + byte_offset),
guard
);
if (Layout::kRank == 2) {
offset_modes_n += params_.rank2_inc_col;
}
}
if (Layout::kRank == 2) {
offset_modes_m += params_.rank2_inc_row;
}
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
int row_begin = thread_start_row_ + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster;
int64_t offset_modes_m = row_begin * params_.stride_m[0];
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
//
// Compute coordinate and decompose into M modes
//
int coord_m = row * ThreadMap::Delta::kRow + row_begin;
Coord<Layout::kRank / 2, Index> modes_m;
if (Layout::kRank > 2) {
if (kBigEndian) {
modes_m = CoordinateDecomposition<Layout::kRank / 2>(coord_m, params_.divmod_m);
} else {
modes_m = CoordinateDecompositionLittleEndian<Layout::kRank / 2>(coord_m, params_.divmod_m);
}
offset_modes_m = dot(modes_m, params_.stride_m);
}
//
// Compute the offset due to modes M
//
bool row_guard = (coord_m < extent_row_);
int64_t offset_modes_n = thread_start_column_ * params_.stride_n[0];
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
//
// Compute coordinate and decompose into N modes
//
if (Layout::kRank > 2) {
offset_modes_n = offset_modes_n_[column];
}
//
// Compute the pointer and access
//
bool guard;
if (Layout::kRank > 2) {
guard = row_guard && mask_.predicates[column];
} else {
guard = (coord_m < extent_row_) && ((thread_start_column_ + ThreadMap::Delta::kColumn * column) < extent_col_);
}
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void *)(byte_pointer + offset_modes_m + offset_modes_n + byte_offset),
guard);
if (Layout::kRank == 2) {
offset_modes_n += params_.rank2_inc_col;
}
}
if (Layout::kRank == 2) {
offset_modes_m += params_.rank2_inc_row;
}
}
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_byte_offset(frag, 0);
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorAffineRankN &operator++() {
++state_[0];
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h",
"repo_id": "include",
"token_count": 7606
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile
that participate in one warp-level store operation.
Typically, the accumulator tile is the largest single block of register-backed storage
within the kernel. Storing it to memory is best accomplished by partitioning it into
smaller tiles and storing these sequentially.
Round trips through shared memory during the Epilogue phase require partitioning, as
shared memory capacity is typically insufficient for a threadblock's total accumulator
size.
*/
#pragma once
#if !(defined(__clang__) && defined(__CUDA__))
#include "cutlass/wmma_array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/epilogue/warp/wmma_tensor_op_policy.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
///
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorElementC, ///< matrix multiply operation data type (concept: data type)
typename OperatorFragmentC, ///< matrix multiply operation fragment (concept: nvcuda::cuda::fragment)
typename Layout ///< target shared memory layout
>
class FragmentIteratorWmmaTensorOp;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major shared memory
template <
typename WarpShape_, ///< shape of the warp-level GEMM tile
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorElementC_, ///< matrix multiply operation data type (concept: data type)
typename OperatorFragmentC_ ///< matrix multiply operation fragment (concept: nvcuda::cuda::fragment)
>
class FragmentIteratorWmmaTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using OperatorElementC = OperatorElementC_;
using OperatorFragmentC = OperatorFragmentC_;
using Layout = layout::RowMajor;
using Policy = WmmaTensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = WmmaFragmentArray<OperatorFragmentC, Policy::OperatorCount::kColumn>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = WmmaFragmentArray<OperatorFragmentC, Policy::OperatorCount::kCount>;
using OutputAccumulatorTile = AccumulatorTile;
private:
/// Internal access type
using AccessType = WmmaFragmentArray<OperatorFragmentC, Policy::kWmmaFragmentsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
FragmentIteratorWmmaTensorOp(AccumulatorTile const &accum):
accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0) {
}
/// Increments
CUTLASS_HOST_DEVICE
FragmentIteratorWmmaTensorOp &operator++() {
++index_;
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
FragmentIteratorWmmaTensorOp &operator--() {
--index_;
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, int index_offset = 0) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for(int n=0; n < Policy::OperatorCount::kColumn; n++) {
int accumulator_access_offset = index_ * Policy::OperatorCount::kColumn + n;
frag_ptr[n] = accumulators_[accumulator_access_offset];
}
}
};
} // namespace warp
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
#else
#error (defined(__clang__) && defined(__CUDA__))
#endif // !defined(__clang__)
| include/cutlass/epilogue/warp/fragment_iterator_wmma_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/warp/fragment_iterator_wmma_tensor_op.h",
"repo_id": "include",
"token_count": 1737
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level grouped Rank2K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/kernel/rank_2k_transpose_operands.h"
#include "cutlass/gemm/kernel/default_rank_2k.h"
#include "cutlass/gemm/kernel/default_rank_2k_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_ = BlasMode::kSymmetric,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly,
///
typename Enable = void
>
struct DefaultRank2KGrouped;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Real-valued grouped Rank2K
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_
>
struct DefaultRank2KGrouped<ElementA, LayoutA, TransformA, kAlignmentA,
ElementB, LayoutB, TransformB, kAlignmentB,
ElementC, LayoutC,
FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape,
WarpShape, InstructionShape, EpilogueOutputOp,
ThreadblockSwizzle, Stages, Operator, BlasMode_, GroupScheduleMode_,
typename platform::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
> {
// If true, we must construct a 'transposed-and-exchanged' Rank2K operator.
static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
using MapArguments = kernel::detail::Rank2KMapArguments<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
LayoutC,
FillModeC,
kInternalTranspose
>;
// Define the default grouped Rank2K kernel
using DefaultRank2Kkernel = typename kernel::DefaultRank2K<
typename MapArguments::ElementA,
typename MapArguments::LayoutA,
MapArguments::kAlignmentA,
typename MapArguments::ElementB,
typename MapArguments::LayoutB,
MapArguments::kAlignmentB,
ElementC,
typename MapArguments::LayoutC,
MapArguments::kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
false, // SplitKSerial
Operator,
BlasMode_
>::Rank2Kkernel;
/// Define the kernel in terms of the default kernel
using Rank2Kkernel = kernel::Rank2KGrouped<
typename DefaultRank2Kkernel::Mma1,
typename DefaultRank2Kkernel::Mma2,
typename DefaultRank2Kkernel::Epilogue,
ThreadblockSwizzle,
TransformA,
TransformB,
DefaultRank2Kkernel::kFillModeC,
DefaultRank2Kkernel::kBlasMode,
GroupScheduleMode_,
kInternalTranspose
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex-valued grouped Rank2K
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_
>
struct DefaultRank2KGrouped<ElementA, LayoutA, TransformA, kAlignmentA,
ElementB, LayoutB, TransformB, kAlignmentB,
ElementC, LayoutC,
FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape,
WarpShape, InstructionShape, EpilogueOutputOp,
ThreadblockSwizzle, Stages, Operator, BlasMode_, GroupScheduleMode_,
typename platform::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
> {
// If true, we must construct a 'transposed-and-exchanged' Rank2K operator.
static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
using MapArguments = kernel::detail::Rank2KMapArguments<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
LayoutC,
FillModeC,
kInternalTranspose
>;
// Define the default grouped Rank2K kernel
using DefaultRank2Kkernel = typename kernel::DefaultRank2KComplex<
typename MapArguments::ElementA,
typename MapArguments::LayoutA,
typename MapArguments::ElementB,
typename MapArguments::LayoutB,
ElementC,
typename MapArguments::LayoutC,
MapArguments::kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MapArguments::kTransformA,
MapArguments::kTransformB,
Operator,
false, // SplitKSerial
BlasMode_
>::Rank2Kkernel;
/// Define the kernel in terms of the default kernel
/// Pass through the user-provided TransformA and TransformB so as to
/// correctly set public-facing TransformA and TransformB in kernel::Rank2KGrouped.
/// This is needed because kernel::DefaultRank2KComplex may change TransformA and
/// TransformB that become template arguments to Mma1 and Mma2.
using Rank2Kkernel = kernel::Rank2KGrouped<
typename DefaultRank2Kkernel::Mma1,
typename DefaultRank2Kkernel::Mma2,
typename DefaultRank2Kkernel::Epilogue,
ThreadblockSwizzle,
TransformA,
TransformB,
DefaultRank2Kkernel::kFillModeC,
DefaultRank2Kkernel::kBlasMode,
GroupScheduleMode_,
kInternalTranspose
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/default_rank_2k_grouped.h/0 | {
"file_path": "include/cutlass/gemm/kernel/default_rank_2k_grouped.h",
"repo_id": "include",
"token_count": 3963
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Scheduler for grouped GEMM
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/gemm/kernel/grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
// Helper for correctly representing problem sizes in grouped kernels
template <
typename ThreadblockShape,
bool Transposed
>
struct GemmGroupedProblemSizeHelper {
static bool const kTransposed = Transposed;
CUTLASS_HOST_DEVICE
static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) {
return cutlass::gemm::GemmCoord(
((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM),
((problem.n() - 1 + ThreadblockShape::kN) / ThreadblockShape::kN),
1);
}
CUTLASS_HOST_DEVICE
static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {
if (kTransposed) {
swap(problem.m(), problem.n());
}
}
CUTLASS_HOST_DEVICE
static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) {
return grid.m() * grid.n();
}
};
} // namespace detail
/// Visitor class to abstract away the algorithm for iterating over tiles
template <typename ThreadblockShape,
GroupScheduleMode GroupScheduleMode_,
int PrefetchTileCount,
int ThreadCount,
bool Transposed = false>
struct GemmGroupedProblemVisitor : public GroupedProblemVisitor<
detail::GemmGroupedProblemSizeHelper<ThreadblockShape, Transposed>,
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount> {
static bool const kTransposed = Transposed;
using ProblemSizeHelper = detail::GemmGroupedProblemSizeHelper<ThreadblockShape, Transposed>;
using Base = GroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape, GroupScheduleMode_, PrefetchTileCount, ThreadCount>;
using Params = typename Base::Params;
using SharedStorage = typename Base::SharedStorage;
//
// Methods
//
CUTLASS_DEVICE
GemmGroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base (params_, shared_storage_, block_idx)
{}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_grouped_problem_visitor.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_grouped_problem_visitor.h",
"repo_id": "include",
"token_count": 1488
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Gemm kernel with fused reduction operation.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/layout.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
#include "cutlass/subbyte_reference.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool IsSingleSource = Epilogue_::kIsSingleSource
>
struct GemmWithFusedEpilogue;
// GemmWithFusedEpilogue with two sources
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmWithFusedEpilogue<Mma_, Epilogue_, ThreadblockSwizzle_, false> {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(
128 / sizeof_bits<ElementA>::value,
128 / sizeof_bits<ElementB>::value
);
//
// Structures
//
/// Argument structure
struct Arguments : UniversalArgumentsBase{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C1;
void const * ptr_C2;
void * ptr_D;
void * ptr_Vector;
void * ptr_Tensor;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C1;
int64_t batch_stride_C2;
int64_t batch_stride_Vector;
int64_t batch_stride_Tensor;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc1;
typename LayoutC::Stride::Index ldc2;
typename LayoutC::Stride::Index ldd;
typename LayoutC::Stride::Index ldr;
typename LayoutC::Stride::Index ldt;
//
// Methods
//
Arguments():
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C1(nullptr),
ptr_C2(nullptr),
ptr_D(nullptr)
{}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C1,
void const * ptr_C2,
void * ptr_D,
void * ptr_Vector,
void * ptr_Tensor,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C1,
int64_t batch_stride_C2,
int64_t batch_stride_D,
int64_t batch_stride_Vector,
int64_t batch_stride_Tensor,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc1,
typename LayoutC::Stride::Index ldc2,
typename LayoutC::Stride::Index ldd,
typename LayoutC::Stride::Index ldr,
typename LayoutC::Stride::Index ldt)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C1(ptr_C1), ptr_C2(ptr_C2), ptr_D(ptr_D),
ptr_Vector(ptr_Vector),
ptr_Tensor(ptr_Tensor),
batch_stride_A(batch_stride_A),
batch_stride_B(batch_stride_B),
batch_stride_C1(batch_stride_C1),
batch_stride_C2(batch_stride_C2),
batch_stride_Vector(batch_stride_Vector),
batch_stride_Tensor(batch_stride_Tensor),
lda(lda), ldb(ldb), ldc1(ldc1), ldc2(ldc2), ldd(ldd), ldr(ldr), ldt(ldt)
{
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size);
CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
CUTLASS_TRACE_HOST(" ldt: " << this->ldt);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
//
// Data members
//
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C1;
typename Epilogue::OutputTileIterator::Params params_C2;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::TensorTileIterator::Params params_Tensor;
typename EpilogueOutputOp::Params output_op;
void * ptr_A;
void * ptr_B;
void * ptr_C1;
void * ptr_C2;
void * ptr_D;
void * ptr_Vector;
typename LayoutC::Stride::Index ldr;
void * ptr_Tensor;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C1;
int64_t batch_stride_C2;
int64_t batch_stride_Vector;
int64_t batch_stride_Tensor;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda),
params_B(args.ldb),
params_C1(args.ldc1),
params_C2(args.ldc2),
params_D(args.ldd),
params_Tensor(args.ldt),
output_op(args.epilogue),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C1(const_cast<void *>(args.ptr_C1)),
ptr_C2(const_cast<void *>(args.ptr_C2)),
ptr_D(args.ptr_D),
ptr_Vector(args.ptr_Vector),
ldr(args.ldr),
ptr_Tensor(args.ptr_Tensor),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C1(args.batch_stride_C1),
batch_stride_C2(args.batch_stride_C2),
batch_stride_Vector(args.batch_stride_Vector),
batch_stride_Tensor(args.batch_stride_Tensor)
{
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Params::Params()");
CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
CUTLASS_TRACE_HOST(" ldt: " << args.ldt);
}
/// Lightweight update given a subset of arguments.
CUTLASS_HOST_DEVICE
void update(Arguments const &args)
{
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C1 = const_cast<void *>(args.ptr_C1);
ptr_C2 = const_cast<void *>(args.ptr_C2);
ptr_D = args.ptr_D;
ptr_Vector = args.ptr_Vector;
ldr = args.ldr;
ptr_Tensor = args.ptr_Tensor;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C1 = args.batch_stride_C1;
batch_stride_C2 = args.batch_stride_C2;
batch_stride_Vector = args.batch_stride_Vector;
batch_stride_Tensor = args.batch_stride_Tensor;
this->batch_stride_D = args.batch_stride_D;
output_op = args.epilogue;
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Params::update()");
CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::can_implement()");
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmWithFusedEpilogue op;
op(params, shared_storage);
}
#define SPLIT_K_ENABLED 1
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
#if SPLIT_K_ENABLED
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
#endif
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C1 = static_cast<ElementC *>(params.ptr_C1);
ElementC *ptr_C2 = static_cast<ElementC *>(params.ptr_C2);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor);
// Define the reduction output pointer and move to the appropriate place
typename Epilogue::ElementVector *ptr_Vector =
static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
//
// Fetch pointers based on mode.
//
//
// Special path when split-K not enabled.
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() == 1) {
// Tile iterators loading from source tensors.
typename Epilogue::OutputTileIterator iterator_C1(
params.params_C1,
ptr_C1,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_C2(
params.params_C2,
ptr_C2,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(
params.params_Tensor,
// Only the final block outputs Tensor
ptr_Tensor,
params.problem_size.mn(),
thread_idx,
threadblock_offset);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr;
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
ptr_Vector,
iterator_D,
accumulators,
iterator_C1,
iterator_C2,
tensor_iterator,
params.problem_size.mn(),
threadblock_offset);
return;
}
//
// Slower path when split-K or batching is needed
//
#if SPLIT_K_ENABLED
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C1 += threadblock_tile_offset.k() * params.batch_stride_C1;
if (ptr_C2) {
ptr_C2 += threadblock_tile_offset.k() * params.batch_stride_C2;
}
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
if (ptr_Tensor) {
ptr_Tensor = ReferenceFactory<typename Epilogue::ElementTensor>::add_pointer_offset(
ptr_Tensor,
threadblock_tile_offset.k() * params.batch_stride_Tensor);
}
if (ptr_Vector) {
ptr_Vector += threadblock_tile_offset.k() * params.batch_stride_Vector;
}
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C1 = static_cast<ElementC * const *>(params.ptr_C1)[threadblock_tile_offset.k()];
if (ptr_C2) {
ptr_C2 = static_cast<ElementC * const *>(params.ptr_C2)[threadblock_tile_offset.k()];
}
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
if (ptr_Tensor) {
ptr_Tensor = static_cast<typename Epilogue::ElementTensor * const *>(params.ptr_Tensor)[threadblock_tile_offset.k()];
}
if (ptr_Vector) {
ptr_Vector = static_cast<typename Epilogue::ElementVector * const *>(params.ptr_Vector)[threadblock_tile_offset.k()];
}
}
#endif
// Tile iterators loading from source tensors.
typename Epilogue::OutputTileIterator iterator_C1(
params.params_C1,
ptr_C1,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_C2(
params.params_C2,
ptr_C2,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(
params.params_Tensor,
// Only the final block outputs Tensor
((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: ptr_Tensor,
params.problem_size.mn(),
thread_idx,
threadblock_offset);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
#if SPLIT_K_ENABLED
// Wait on the semaphore - this latency may have been covered by iterator construction
if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C1 = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
#endif
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr;
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
// Only the final block uses Vector
((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: ptr_Vector,
iterator_D,
accumulators,
iterator_C1,
iterator_C2,
tensor_iterator,
params.problem_size.mn(),
threadblock_offset);
//
// Release the semaphore
//
#if SPLIT_K_ENABLED
if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
#endif
}
};
// GemmWithFusedEpilogue with one source
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmWithFusedEpilogue<Mma_, Epilogue_, ThreadblockSwizzle_, true> {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(
128 / sizeof_bits<ElementA>::value,
128 / sizeof_bits<ElementB>::value
);
//
// Structures
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
void * ptr_Vector;
void * ptr_Tensor;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_Vector;
int64_t batch_stride_Tensor;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldd;
typename LayoutC::Stride::Index ldr;
typename LayoutC::Stride::Index ldt;
//
// Methods
//
Arguments():
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr)
{}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
void * ptr_Vector,
void * ptr_Tensor,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
int64_t batch_stride_Vector,
int64_t batch_stride_Tensor,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd,
typename LayoutC::Stride::Index ldr,
typename LayoutC::Stride::Index ldt)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
ptr_Vector(ptr_Vector),
ptr_Tensor(ptr_Tensor),
batch_stride_A(batch_stride_A),
batch_stride_B(batch_stride_B),
batch_stride_C(batch_stride_C),
batch_stride_Vector(batch_stride_Vector),
batch_stride_Tensor(batch_stride_Tensor),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ldr(ldr), ldt(ldt)
{
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size);
CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
CUTLASS_TRACE_HOST(" ldt: " << this->ldt);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
//
// Data members
//
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::TensorTileIterator::Params params_Tensor;
typename EpilogueOutputOp::Params output_op;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
void * ptr_Vector;
typename LayoutC::Stride::Index ldr;
void * ptr_Tensor;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_Vector;
int64_t batch_stride_Tensor;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda),
params_B(args.ldb),
params_C(args.ldc),
params_D(args.ldd),
params_Tensor(args.ldt),
output_op(args.epilogue),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(args.ptr_D),
ptr_Vector(args.ptr_Vector),
ldr(args.ldr),
ptr_Tensor(args.ptr_Tensor),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_Vector(args.batch_stride_Vector),
batch_stride_Tensor(args.batch_stride_Tensor)
{
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Params::Params()");
CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
CUTLASS_TRACE_HOST(" ldt: " << args.ldt);
}
/// Lightweight update given a subset of arguments.
CUTLASS_HOST_DEVICE
void update(Arguments const &args)
{
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
ptr_Vector = args.ptr_Vector;
ldr = args.ldr;
ptr_Tensor = args.ptr_Tensor;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C = args.batch_stride_C;
batch_stride_Vector = args.batch_stride_Vector;
batch_stride_Tensor = args.batch_stride_Tensor;
this->batch_stride_D = args.batch_stride_D;
output_op = args.epilogue;
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Params::update()");
CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::can_implement()");
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmWithFusedEpilogue op;
op(params, shared_storage);
}
#define SPLIT_K_ENABLED 1
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
#if SPLIT_K_ENABLED
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
#endif
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor);
// Define the reduction output pointer and move to the appropriate place
typename Epilogue::ElementVector *ptr_Vector =
static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
//
// Fetch pointers based on mode.
//
//
// Special path when split-K not enabled.
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() == 1) {
// Tile iterators loading from source tensors.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(
params.params_Tensor,
// Only the final block outputs Tensor
ptr_Tensor,
params.problem_size.mn(),
thread_idx,
threadblock_offset);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr;
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
ptr_Vector,
iterator_D,
accumulators,
iterator_C,
tensor_iterator,
params.problem_size.mn(),
threadblock_offset);
return;
}
//
// Slower path when split-K or batching is needed
//
#if SPLIT_K_ENABLED
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
if (ptr_Tensor) {
ptr_Tensor = ReferenceFactory<typename Epilogue::ElementTensor>::add_pointer_offset(
ptr_Tensor,
threadblock_tile_offset.k() * params.batch_stride_Tensor);
}
if (ptr_Vector) {
ptr_Vector += threadblock_tile_offset.k() * params.batch_stride_Vector;
}
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
if (ptr_Tensor) {
ptr_Tensor = static_cast<typename Epilogue::ElementTensor * const *>(params.ptr_Tensor)[threadblock_tile_offset.k()];
}
if (ptr_Vector) {
ptr_Vector = static_cast<typename Epilogue::ElementVector * const *>(params.ptr_Vector)[threadblock_tile_offset.k()];
}
}
#endif
// Tile iterators loading from source tensors.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(
params.params_Tensor,
// Only the final block outputs Tensor
((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: ptr_Tensor,
params.problem_size.mn(),
thread_idx,
threadblock_offset);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
#if SPLIT_K_ENABLED
// Wait on the semaphore - this latency may have been covered by iterator construction
if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
#endif
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr;
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
// Only the final block uses Vector
((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: ptr_Vector,
iterator_D,
accumulators,
iterator_C,
tensor_iterator,
params.problem_size.mn(),
threadblock_offset);
//
// Release the semaphore
//
#if SPLIT_K_ENABLED
if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_with_fused_epilogue.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_with_fused_epilogue.h",
"repo_id": "include",
"token_count": 19707
} | 28 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/workspace.h"
#include "cutlass/fast_math.h"
#include "cutlass/kernel_hardware_info.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/arch/reg_reconfig.h"
#include "cutlass/arch/mma_sm90.h"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/kernel/tile_scheduler.hpp"
#include "cutlass/pipeline/pipeline.hpp"
#include "cute/tensor.hpp"
#include "cutlass/trace.h"
///////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::kernel {
///////////////////////////////////////////////////////////////////////////////
template <
class ProblemShape_,
class CollectiveMainloop_,
class CollectiveEpilogue_,
class TileScheduler_
>
class GemmUniversal<
ProblemShape_,
CollectiveMainloop_,
CollectiveEpilogue_,
TileScheduler_,
cute::enable_if_t<cute::is_base_of_v<KernelTmaWarpSpecializedCooperative, typename CollectiveMainloop_::DispatchPolicy::Schedule>>>
{
public:
//
// Type Aliases
//
using ProblemShape = ProblemShape_;
static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4,
"ProblemShape{} should be <M,N,K> or <M,N,K,L>");
// Mainloop derived types
using CollectiveMainloop = CollectiveMainloop_;
using TileShape = typename CollectiveMainloop::TileShape;
using TiledMma = typename CollectiveMainloop::TiledMma;
using ArchTag = typename CollectiveMainloop::ArchTag;
using ElementA = typename CollectiveMainloop::ElementA;
using StrideA = typename CollectiveMainloop::StrideA;
using ElementB = typename CollectiveMainloop::ElementB;
using StrideB = typename CollectiveMainloop::StrideB;
using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy;
using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator;
using ClusterShape = typename DispatchPolicy::ClusterShape;
using MainloopArguments = typename CollectiveMainloop::Arguments;
using MainloopParams = typename CollectiveMainloop::Params;
// Epilogue derived types
using CollectiveEpilogue = CollectiveEpilogue_;
using ElementC = typename CollectiveEpilogue::ElementC;
using StrideC = typename CollectiveEpilogue::StrideC;
using ElementD = typename CollectiveEpilogue::ElementD;
using StrideD = typename CollectiveEpilogue::StrideD;
using EpilogueArguments = typename CollectiveEpilogue::Arguments;
using EpilogueParams = typename CollectiveEpilogue::Params;
static_assert(ArchTag::kMinComputeCapability >= 90);
using TileSchedulerTag = TileScheduler_;
using TileScheduler = typename detail::TileSchedulerSelector<
TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler;
using TileSchedulerArguments = typename TileScheduler::Arguments;
using TileSchedulerParams = typename TileScheduler::Params;
static constexpr uint32_t NumLoadWarpGroups = 1;
static constexpr uint32_t NumMmaWarpGroups = CUTE_STATIC_V(size(TiledMma{})) / NumThreadsPerWarpGroup;
static constexpr uint32_t MaxThreadsPerBlock = CUTE_STATIC_V(size(TiledMma{})) + (NumLoadWarpGroups * NumThreadsPerWarpGroup);
static constexpr uint32_t MinBlocksPerMultiprocessor = 1;
/// Register requirement for Load and Math WGs
static constexpr uint32_t LoadRegisterRequirement = 40;
static constexpr uint32_t MmaRegisterRequirement = 232;
// 1 stage ordered sequence between mainloop and epilogue producer load threads
using LoadWarpOrderBarrier = cutlass::OrderedSequenceBarrier<1,2>;
// Kernel level shared memory storage
struct SharedStorage {
struct TensorStorage : cute::aligned_struct<128> {
using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage;
using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage;
MainloopTensorStorage mainloop;
EpilogueTensorStorage epilogue;
} tensors;
struct PipelineStorage : cute::aligned_struct<16> {
using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage;
using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage;
alignas(16) MainloopPipelineStorage mainloop;
alignas(16) EpiLoadPipelineStorage epi_load;
alignas(16) typename LoadWarpOrderBarrier::SharedStorage load_order;
} pipelines;
};
static constexpr int SharedStorageSize = sizeof(SharedStorage);
// Device side arguments
struct Arguments {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopArguments mainloop{};
EpilogueArguments epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerArguments scheduler{};
};
// Kernel entry point API
struct Params {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopParams mainloop{};
EpilogueParams epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerParams scheduler{};
void* workspace{nullptr};
};
//
// Methods
//
// Convert to underlying arguments. In this case, a simple copy for the aliased type.
static
Params
to_underlying_arguments(Arguments const& args, void* workspace) {
CUTLASS_TRACE_HOST("to_underlying_arguments():");
auto problem_shape = args.problem_shape;
if constexpr (detail::IF_SWAP_AB<CollectiveMainloop>::value) {
// swap M/N
get<0>(problem_shape) = get<1>(args.problem_shape);
get<1>(problem_shape) = get<0>(args.problem_shape);
}
auto problem_shape_MNKL = append<4>(problem_shape, 1);
// Get SM count if needed, otherwise use user supplied SM count
int sm_count = args.hw_info.sm_count;
if (sm_count <= 0) {
CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n"
" For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count.");
sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id);
}
CUTLASS_TRACE_HOST("to_underlying_arguments(): Setting persistent grid SM count to " << sm_count);
KernelHardwareInfo hw_info{args.hw_info.device_id, sm_count};
// Calculate workspace pointers
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
void* scheduler_workspace = workspace_ptr;
workspace_offset += TileScheduler::template get_workspace_size<ProblemShape, ElementAccumulator>(
args.scheduler, args.problem_shape, args.hw_info, NumMmaWarpGroups);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
void* epilogue_workspace = workspace_ptr + workspace_offset;
workspace_offset += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
void* mainloop_workspace = nullptr;
// Precompute the sub tiles numbers in epilogue, pass into tile scheduler. Therefore it will be used
// in separate reduction scheme for streamk case, NumEpilogueSubTiles default value is 1, which means
// subtile will not be used, therefore separate reduction will not be enabled.
constexpr uint32_t NumEpilogueSubTiles = CollectiveEpilogue::get_store_pipe_increment(TileShape{});
TileSchedulerParams scheduler = TileScheduler::to_underlying_arguments(
problem_shape_MNKL, TileShape{}, ClusterShape{}, hw_info, args.scheduler, scheduler_workspace, NumEpilogueSubTiles);
return {
args.mode,
problem_shape,
CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, mainloop_workspace),
CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, epilogue_workspace),
hw_info,
scheduler,
workspace
};
}
CUTLASS_HOST_DEVICE static
bool
can_implement(Arguments const& args) {
bool implementable = (args.mode == GemmUniversalMode::kGemm) or
(args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4);
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n");
return implementable;
}
implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop);
implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue);
implementable &= TileScheduler::can_implement(args.scheduler);
return implementable;
}
static size_t
get_workspace_size(Arguments const& args) {
size_t workspace_size = 0;
constexpr uint32_t NumEpilogueSubTiles = CollectiveEpilogue::get_store_pipe_increment(TileShape{});
workspace_size += TileScheduler::template get_workspace_size<ProblemShape, ElementAccumulator>(
args.scheduler, args.problem_shape, args.hw_info, NumMmaWarpGroups, NumEpilogueSubTiles);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
return workspace_size;
}
static cutlass::Status
initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr,
CudaHostAdapter* cuda_adapter = nullptr) {
Status status = Status::kSuccess;
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
constexpr uint32_t NumEpilogueSubTiles = CollectiveEpilogue::get_store_pipe_increment(TileShape{});
status = TileScheduler::template initialize_workspace<ProblemShape, ElementAccumulator>(
args.scheduler, workspace_ptr + workspace_offset, stream, args.problem_shape, args.hw_info, NumMmaWarpGroups, NumEpilogueSubTiles);
workspace_offset += TileScheduler::template get_workspace_size<ProblemShape, ElementAccumulator>(
args.scheduler, args.problem_shape, args.hw_info, NumMmaWarpGroups, NumEpilogueSubTiles);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = CollectiveEpilogue::initialize_workspace(args.problem_shape, args.epilogue, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
return status;
}
// Computes the kernel launch grid shape based on runtime parameters
static dim3
get_grid_shape(Params const& params) {
// Given device SM count, set grid size s.t. we do not launch more thread blocks than we can run concurrently
TileSchedulerArguments args{};
if constexpr (!std::is_const_v<decltype(args.max_swizzle_size)>) {
args.max_swizzle_size = 1 << params.scheduler.log_swizzle_size_;
}
args.raster_order = params.scheduler.raster_order_ == TileScheduler::RasterOrder::AlongN ? TileScheduler::RasterOrderOptions::AlongN : TileScheduler::RasterOrderOptions::AlongM;
return TileScheduler::get_grid_shape(params.problem_shape, TileShape{}, ClusterShape{}, params.hw_info, args);
}
static dim3
get_block_shape() {
return dim3(MaxThreadsPerBlock, 1, 1);
}
CUTLASS_DEVICE
void
operator()(Params const& params, char* smem_buf) {
using namespace cute;
using X = Underscore;
// Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a.
#if ! defined(__CUDA_ARCH_FEAT_SM90_ALL)
printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n");
#else
// Preconditions
static_assert(size(TiledMma{}) == 256, "Cooperative kernel must have TiledMMA operating using 256 threads.");
static_assert(size<0>(TileShape{}) >= 128,
"Cooperative kernel requires Tile Size to be greater than or equal to 128 along the M-dimension.");
static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
/* In the Cooperative kernel, Consumer0 and Consumer1 collaborate on the same tile */
enum class WarpGroupRole {
Producer = 0,
Consumer0 = 1,
Consumer1 = 2
};
enum class ProducerWarpRole {
Mainloop = 0,
Warp1 = 1,
Epilogue = 2,
Warp3 = 3
};
// Kernel level shared memory storage
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(smem_buf);
int thread_idx = int(threadIdx.x);
int lane_idx = canonical_lane_idx();
int warp_idx = canonical_warp_idx_sync();
int warp_idx_in_warp_group = warp_idx % NumWarpsPerWarpGroup;
int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup;
int mma_thread_idx = thread_idx % size(TiledMma{});
auto warp_group_role = WarpGroupRole(canonical_warp_group_idx());
auto producer_warp_role = ProducerWarpRole(warp_idx_in_warp_group);
int lane_predicate = cute::elect_one_sync();
uint32_t block_rank_in_cluster = cute::block_rank_in_cluster();
// Issue Tma Descriptor Prefetch from a single thread
if ((warp_idx == 0) && lane_predicate) {
CollectiveMainloop::prefetch_tma_descriptors(params.mainloop);
CollectiveEpilogue::prefetch_tma_descriptors(params.epilogue);
}
// Mainloop Load pipeline
using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline;
typename MainloopPipeline::Params mainloop_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Mainloop) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer;
}
mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0;
mainloop_pipeline_params.num_consumers = size(TiledMma{});
mainloop_pipeline_params.transaction_bytes = CollectiveMainloop::TmaTransactionBytes;
MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params, ClusterShape{});
// Epilogue Load pipeline
using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline;
typename EpiLoadPipeline::Params epi_load_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Epilogue) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer;
}
epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster();
epi_load_pipeline_params.producer_arv_count = NumThreadsPerWarp;
epi_load_pipeline_params.consumer_arv_count = size(TiledMma{});
epi_load_pipeline_params.transaction_bytes = CollectiveEpilogue::TmaTransactionBytes;
EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params);
// Epilogue Store pipeline
using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline;
typename EpiStorePipeline::Params epi_store_pipeline_params;
epi_store_pipeline_params.always_wait = true;
EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params);
typename LoadWarpOrderBarrier::Params params_load_order_barrier;
params_load_order_barrier.group_id = producer_warp_role == ProducerWarpRole::Mainloop ? 0 : 1;
params_load_order_barrier.group_size = NumThreadsPerWarp;
LoadWarpOrderBarrier load_order_barrier(shared_storage.pipelines.load_order, params_load_order_barrier);
// Initialize starting pipeline states for the collectives
// Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding)
typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state;
typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state;
// For the DMA Load (producer) we start with an opposite phase
// i.e., we skip all waits since we know that the buffer is indeed empty
PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state<MainloopPipeline>();
PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state<EpiLoadPipeline>();
PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state<EpiStorePipeline>();
auto cluster_wait_fn = [] () {
// We need this to guarantee that the Pipeline init is visible
// To all producers and consumer thread blocks in the Cluster
if constexpr (size(ClusterShape{}) > 1) {
cute::cluster_arrive_relaxed();
return [] () { cute::cluster_wait(); };
}
else {
__syncthreads();
return [] () {}; // do nothing
}
} ();
// Optionally append 1s until problem shape is rank-4 in case it is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{});
// Get the appropriate blocks for this thread block -- potential for thread block locality
TiledMma tiled_mma;
auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K)
TileScheduler scheduler{params.scheduler};
auto work_tile_info = scheduler.get_current_work();
// In a warp specialized kernel, collectives expose data movement and compute operations separately
CollectiveMainloop collective_mainloop;
CollectiveEpilogue collective_epilogue(params.epilogue, shared_storage.tensors.epilogue);
// Prepare and partition the input tensors. Expects a tuple of tensors where:
// get<0>(load_inputs) is the tma tensor A after local tiling so that it has shape (BLK_M,BLK_K,m,k,l)
// get<1>(load_inputs) is the tma tensor B after local tiling so that it has shape (BLK_N,BLK_K,n,k,l)
auto load_inputs = collective_mainloop.load_init(problem_shape_MNKL, params.mainloop);
static_assert(cute::tuple_size_v<decltype(load_inputs)> >= 2, "Output of load_init must have at least two elements (A, B)");
// Extract out partitioned A and B.
Tensor gA_mkl = get<0>(load_inputs);
Tensor gB_nkl = get<1>(load_inputs);
// Get pipeline stage increments from tensor shapes
auto k_tile_count = size<3>(gA_mkl);
// Wait for all thread blocks in the Cluster
cluster_wait_fn();
if (warp_group_role == WarpGroupRole::Producer) {
cutlass::arch::warpgroup_reg_dealloc<LoadRegisterRequirement>();
// Mainloop Producer Warp
if (producer_warp_role == ProducerWarpRole::Mainloop) {
bool do_load_order_arrive = true;
while (work_tile_info.is_valid()) {
if (!TileScheduler::valid_warpgroup_in_work_tile(work_tile_info)) {
work_tile_info = fetch_next_work(work_tile_info, scheduler);
continue;
}
// Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape
auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl));
auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl));
auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, l_coord);
// Get the number of K tiles to compute for this work as well as the starting K tile offset of the work.
auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape);
auto work_k_tile_start = TileScheduler::get_work_k_tile_start(work_tile_info);
auto k_tile_iter = cute::make_coord_iterator(idx2crd(work_k_tile_start, shape<3>(gA_mkl)), shape<3>(gA_mkl));
collective_mainloop.load(
params.mainloop,
mainloop_pipeline,
mainloop_pipe_producer_state,
load_inputs,
blk_coord,
k_tile_iter, work_k_tile_count,
lane_idx,
block_rank_in_cluster,
shared_storage.tensors.mainloop
);
// Update starting pipeline state for the next tile
mainloop_pipe_producer_state.advance(work_k_tile_count);
// Signal for the epilogue load warp to begin
if (do_load_order_arrive) {
load_order_barrier.arrive();
do_load_order_arrive = false;
}
// Get next work tile
work_tile_info = fetch_next_work(work_tile_info, scheduler);
} // Scheduler work fetch loop
// Make sure all Consumer Warp Groups have been waited upon
collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state);
} // Mainloop Producer Warp End
// Epilogue Producer Warp
else if (producer_warp_role == ProducerWarpRole::Epilogue && collective_epilogue.is_producer_load_needed()) {
while (work_tile_info.is_valid()) {
if (!TileScheduler::requires_separate_reduction(params.scheduler)) {
load_order_barrier.wait();
}
if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler)) {
// Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape
auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl));
auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl));
auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, l_coord);
epi_load_pipe_producer_state =
collective_epilogue.load(
epi_load_pipeline,
epi_load_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
tiled_mma,
lane_idx,
shared_storage.tensors.epilogue,
work_tile_info.reduction_subtile_idx()
);
}
// Get next work tile
work_tile_info = fetch_next_work(work_tile_info, scheduler);
} // Scheduler work fetch loop
// Make sure all Consumer Warp Groups have been waited upon
collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state);
} // Epilogue Producer Warp End
} // Producer Warp Group End
else if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) {
cutlass::arch::warpgroup_reg_alloc<MmaRegisterRequirement>();
// Do we potentially issue tail arrives for TMA stores, if epilogue load is waiting for it
bool do_store_tail = false;
while (work_tile_info.is_valid()) {
// Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape
auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl));
auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl));
auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, l_coord);
auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape);
// Allocate the accumulators for the (M,N) blk_shape
//
// MSVC CTAD breaks if we say "Tensor" here, so we use "auto" instead.
auto accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N)
if(TileScheduler::valid_warpgroup_in_work_tile(work_tile_info)) {
collective_mainloop.mma(
mainloop_pipeline,
mainloop_pipe_consumer_state,
accumulators,
work_k_tile_count,
mma_thread_idx,
shared_storage.tensors.mainloop,
params.mainloop
);
// Make sure the math instructions are done and free buffers before entering the epilogue
collective_mainloop.mma_tail(
mainloop_pipeline,
mainloop_pipe_consumer_state,
work_k_tile_count
);
// Update starting mainloop pipeline state for the next tile
mainloop_pipe_consumer_state.advance(work_k_tile_count);
}
// Index of warp group within consumer warp groups
int consumer_warp_group_idx = canonical_warp_group_idx() - NumLoadWarpGroups;
// Perform reduction across splits, if needed
TileScheduler::fixup(
params.scheduler, work_tile_info, accumulators, NumMmaWarpGroups, consumer_warp_group_idx);
if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler)) {
// Epilogue and write to gD
auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] =
collective_epilogue.store(
epi_load_pipeline,
epi_load_pipe_consumer_state,
epi_store_pipeline,
epi_store_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
accumulators,
tiled_mma,
mma_thread_idx,
shared_storage.tensors.epilogue,
work_tile_info.reduction_subtile_idx()
);
epi_load_pipe_consumer_state = epi_load_pipe_consumer_state_next;
epi_store_pipe_producer_state = epi_store_pipe_producer_state_next;
do_store_tail = true;
}
// Get next work tile
work_tile_info = fetch_next_work(work_tile_info, scheduler);
} // Scheduler work fetch loop
if (do_store_tail) {
collective_epilogue.store_tail(
epi_load_pipeline,
epi_load_pipe_consumer_state,
epi_store_pipeline,
epi_store_pipe_producer_state
);
}
} // Consumer Warp Groups End
#endif
}
private:
// Kernel helper function to get next work unit
CUTLASS_DEVICE
typename TileScheduler::WorkTileInfo
fetch_next_work(
typename TileScheduler::WorkTileInfo& work_tile_info,
TileScheduler& scheduler) const {
// Check whether we should continue on with the current work unit. If this is the case,
// the work unit will have been updated in continue_current_work to reflect the new
// tile to be computed.
if (scheduler.continue_current_work(work_tile_info)) {
return work_tile_info;
}
// Get next work tile
scheduler.advance_to_next_work();
return scheduler.get_current_work();
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::kernel
| include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_cooperative.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_cooperative.hpp",
"repo_id": "include",
"token_count": 10928
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Default warp-level GEMM operators selected by data type, size, and layouts of operands.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Operator describing the tensor operation
typename Operator_ = arch::OpMultiplyAdd,
/// Number of partitions along K dimension
int PartitionsK = 1,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false>
struct DefaultMmaTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for m-by-n-by-kgroup
template <
/// Shape of one matrix production operation (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Operator describing the tensor operation
typename Operator_,
/// Number of partitions along K dimension
int PartitionsK,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor>
struct DefaultMmaTensorOp {
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<InstructionShape_, 32, ElementA,
cutlass::layout::RowMajor, ElementB,
cutlass::layout::ColumnMajor, ElementC,
cutlass::layout::RowMajor, Operator_>,
cutlass::MatrixShape<1, 1> >;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaTensorOp<
WarpShape_, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
Policy, PartitionsK, AccumulatorsInRowMajor>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/gemm/warp/default_mma_tensor_op_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/default_mma_tensor_op.h/0 | {
"file_path": "include/cutlass/gemm/warp/default_mma_tensor_op.h",
"repo_id": "include",
"token_count": 1485
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate
operations targeting sparse Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sparse.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK_ = 1,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Used for partial specialization
typename Enable = bool
>
class SparseMmaTensorOp {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Equivalant base dense mma
using Base = MmaTensorOp<Shape, ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, Policy, PartitionsK_,
AccumulatorsInRowMajor, Enable>;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Base::ArchMmaOperator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Architecture tag from underlying instruction
using ArchTag = typename Base::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = typename Base::OperatorClass;
/// Shape of underlying instruction
using InstructionShape = typename Base::InstructionShape;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Base::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Base::kTransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// Sparsity in Operand A
static int const kSparse = Policy::Operator::kSparse;
/// Meta data size in bits
static int const kMetaSizeInBits = Policy::Operator::kMetaSizeInBits;
/// Max ID2
static int const kMaxID2 = Policy::Operator::kMaxID2;
static int const kVerticalVisit = false;
/// Data type of meta E that is moved at the same time
using ElementE =
typename cutlass::platform::conditional<kMaxID2 == 1, uint32_t,
uint16_t>::type;
/// Number of ElementA that is associated with one ElementE
static int const kElementsPerElementE =
128 / cutlass::sizeof_bits<ElementA>::value;
/// Meta data is essentially interleaved but mapped to ColumnMajor internally
static int const kInterleaved = 2;
/// Layout of meta E
using LayoutE = cutlass::layout::ColumnMajor;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, Operand::kA, ElementA,
LayoutA,
MatrixShape<Policy::Operator::Shape::kM,
Policy::Operator::Shape::kK / kSparse>,
Policy::OpDelta::kRow, kThreadCount, kPartitionsK>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA =
Array<typename Policy::Operator::ElementA, FragmentA::kElements>;
/// Iterates over the B operand in memory
using IteratorB = typename Base::IteratorB;
/// Storage for B tile
using FragmentB = typename Base::FragmentB;
/// Storage for transformed B tile
using TransformedFragmentB = typename Base::TransformedFragmentB;
/// Iterates over the C operand in memory
using IteratorC = typename Base::IteratorC;
/// Storage for C tile
using FragmentC = typename Base::FragmentC;
/// Iterates over the E operand in memory
using IteratorE = SparseMmaTensorOpMetaTileIterator<
MatrixShape<Shape::kM * kInterleaved,
Shape::kK / kSparse / kElementsPerElementE / kInterleaved>,
ElementE, LayoutE,
MatrixShape<Policy::Operator::Shape::kM,
Policy::Operator::Shape::kK / kSparse / kElementsPerElementE /
kInterleaved>,
Policy::OpDelta::kRow, kThreadCount, kPartitionsK>;
/// Storage for E tile
using FragmentE = typename IteratorE::Fragment;
/// Number of mma operations performed
using MmaIterations = typename Base::MmaIterations;
public:
/// Underlying matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
SparseMmaTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C,
FragmentE const &E
) const {
using MmaOperandA = typename Policy::Operator::FragmentA;
using MmaOperandB = typename Policy::Operator::FragmentB;
using MmaOperandC = typename Policy::Operator::FragmentC;
using MmaOperandE = typename Policy::Operator::FragmentE;
D = C;
MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A);
MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B);
MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D);
MmaOperandE const *ptr_E = reinterpret_cast<MmaOperandE const *>(&E);
if (kVerticalVisit) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
int m_serpentine = ((n % 2) ? (MmaIterations::kRow - 1 - m) : m);
int id2 = m_serpentine % kMaxID2;
if (AccumulatorsInRowMajor) { // matrix B is reordered
mma(
ptr_D[n + m_serpentine * MmaIterations::kColumn],
ptr_A[m_serpentine],
ptr_B[n],
ptr_D[n + m_serpentine * MmaIterations::kColumn],
ptr_E[(m_serpentine / kMaxID2)],
id2);
} else {
mma(
ptr_D[m_serpentine + n * MmaIterations::kRow],
ptr_A[m_serpentine],
ptr_B[n],
ptr_D[m_serpentine + n * MmaIterations::kRow],
ptr_E[(m_serpentine / kMaxID2)],
id2);
}
}
}
} else {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
int id2 = m % kMaxID2;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n);
if (AccumulatorsInRowMajor) { // matrix B is reordered
mma(
ptr_D[n_serpentine + m * MmaIterations::kColumn],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[n_serpentine + m * MmaIterations::kColumn],
ptr_E[(m / kMaxID2)],
id2);
} else {
mma(ptr_D[m + n_serpentine * MmaIterations::kRow],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[m + n_serpentine * MmaIterations::kRow],
ptr_E[(m / kMaxID2)],
id2);
}
}
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
//
// Define conversions from source type to instruction type
//
FloatRoundStyle const kRoundA =
PreferredRoundingMode<typename ArchMmaOperator::ElementA,
ElementA>::kRound;
FloatRoundStyle const kRoundB =
PreferredRoundingMode<typename ArchMmaOperator::ElementB,
ElementB>::kRound;
if (kVerticalVisit) {
detail::ConvertAndPack<typename ArchMmaOperator::ElementA, ElementA,
FragmentA::kElements, kRoundA>
convert_A;
NumericArrayConverter<typename ArchMmaOperator::ElementB, ElementB,
FragmentB::kElements / 2, kRoundB>
convert_B;
Array<ElementB, FragmentB::kElements / 2> const *ptr_B =
reinterpret_cast<Array<ElementB, FragmentB::kElements / 2> const *>(&B);
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements / 2> *
ptr_dst_B = reinterpret_cast<Array<typename ArchMmaOperator::ElementB,
FragmentB::kElements / 2> *>(&dst_B);
dst_A = convert_A(A);
ptr_dst_B[0] = convert_B(ptr_B[0]);
ptr_dst_B[1] = convert_B(ptr_B[1]);
} else {
detail::ConvertAndPack<typename ArchMmaOperator::ElementA, ElementA,
FragmentA::kElements / 2, kRoundA>
convert_A;
NumericArrayConverter<typename ArchMmaOperator::ElementB, ElementB,
FragmentB::kElements, kRoundB>
convert_B;
Array<ElementA, FragmentA::kElements / 2> const *ptr_A =
reinterpret_cast<Array<ElementA, FragmentA::kElements / 2> const *>(&A);
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> *
ptr_dst_A = reinterpret_cast<Array<typename ArchMmaOperator::ElementA,
FragmentA::kElements / 2> *>(&dst_A);
dst_B = convert_B(B);
ptr_dst_A[0] = convert_A(ptr_A[0]);
ptr_dst_A[1] = convert_A(ptr_A[1]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_sparse_tensor_op.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_sparse_tensor_op.h",
"repo_id": "include",
"token_count": 5205
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/array_planar_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename TileIterator_>
class TileIteratorPlanarComplex {
public:
/// Underlying iterator over real-valued tiles
using TileIterator = TileIterator_;
/// Underlying element type
using Element = typename TileIterator::Element;
/// Underlying layout type
using Layout = typename TileIterator::Layout;
/// TensorRef type for loading element from a tensor
using TensorRef = typename TileIterator::TensorRef;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Planar complex fragment
using Fragment = ArrayPlanarComplex<Element, TileIterator::Fragment::kElements>;
public:
/// Underlying tile iterator
TileIterator tile_iterator_;
/// Offset (in units of bytes) to the imaginary part of the planar complex matrix
LongIndex imaginary_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
TileIteratorPlanarComplex(): imaginary_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
TileIteratorPlanarComplex(
TensorRef const &ref,
int lane_id,
LongIndex imaginary_offset
):
tile_iterator_(ref, lane_id),
imaginary_offset_((imaginary_offset * sizeof_bits<Element>::value) / 8) { }
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
TileIteratorPlanarComplex &add_pointer_offset(LongIndex offset) {
tile_iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
TileIteratorPlanarComplex &add_tile_offset(TensorCoord const &tile_offset) {
tile_iterator_.add_tile_offset(tile_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
TileIteratorPlanarComplex & operator++() {
++tile_iterator_;
return *this;
}
//
// WIP
//
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
TileIteratorPlanarComplex & operator--() {
--tile_iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
TileIteratorPlanarComplex & operator+=(TensorCoord const &tile_offset) {
tile_iterator_.add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
TileIteratorPlanarComplex & operator-=(TensorCoord const &tile_offset) {
tile_iterator_.add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
tile_iterator_.load_with_byte_offset(frag.real, 0);
tile_iterator_.load_with_byte_offset(frag.imag, imaginary_offset_);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
tile_iterator_.load_with_byte_offset(frag.real, byte_offset);
tile_iterator_.load_with_byte_offset(frag.imag, byte_offset + imaginary_offset_);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
Index byte_offset = (pointer_offset * sizeof_bits<Element>::value)/8;
tile_iterator_.load_with_byte_offset(frag.real, byte_offset);
tile_iterator_.load_with_byte_offset(frag.imag, byte_offset + imaginary_offset_);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
tile_iterator_.load_with_byte_offset(frag.real, tile_offset, 0);
tile_iterator_.load_with_byte_offset(frag.imag, tile_offset, imaginary_offset_);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
Index byte_offset = (pointer_offset * sizeof_bits<Element>::value)/8;
tile_iterator_.load_with_byte_offset(frag.real, tile_offset, byte_offset);
tile_iterator_.load_with_byte_offset(frag.real, tile_offset, byte_offset + imaginary_offset_);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
tile_iterator_.load_with_byte_offset(frag.real, tile_offset, byte_offset);
tile_iterator_.load_with_byte_offset(frag.imag, tile_offset, byte_offset + imaginary_offset_);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
tile_iterator_.set_kgroup_index(k_group);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/tile_iterator_planar_complex.h/0 | {
"file_path": "include/cutlass/gemm/warp/tile_iterator_planar_complex.h",
"repo_id": "include",
"token_count": 2596
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over one or more ranks of an affine tensor
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/device_kernel.h"
#include "cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tensor reduction operator on layouts which are affine
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (e.g. ND => 2)
typename ElementOutput_,
typename ElementSource_,
typename ReductionOp_,
int VectorLength = 1,
typename ElementCompute_ = ElementOutput_,
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
struct TensorReductionAffineContiguous {
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
using ElementOutput = ElementOutput_;
using ElementSource = ElementSource_;
using ReductionOp = ReductionOp_;
using ElementCompute = ElementCompute_;
//
// Data members
//
/// Internal status field
Status status;
/// Extent of tensor in source layout
Coord<kRank> extent;
/// Number of points in the outer index space
int64_t outer_count;
/// Number of elements in the inner index space
int64_t inner_count;
/// Number of workspaces needed
int workspace_count;
/// CUDA Grid shape (.x => contiguous, .y => outer, .z => inner)
dim3 grid_shape;
/// CUDA Threadblock shape (.x => contiguous, .y => outer, .z => inner)
dim3 threadblock_shape;
/// CUDA grid shape for the final reduction step if needed
dim3 grid_final;
/// CUDA threadblock shape for the final reduction step if needed
dim3 threadblock_final;
private:
//
// Methods
//
/// Helper to reshape 'count' such that it is less than 2 x 'ext'
static int reshape_pow2(int ext, int count) {
if (ext > count) {
return 1;
}
int x = 1;
for (; count >= ext * 2; ) {
count >>= 1;
x <<= 1;
}
return x;
}
public:
/// Default ctor
TensorReductionAffineContiguous():
status(Status::kErrorInvalidProblem),
extent(),
outer_count(0),
inner_count(0),
workspace_count(0),
grid_shape(0, 0, 0),
threadblock_shape(0, 0, 0) { }
/// Constructor
TensorReductionAffineContiguous(
Coord<kRank> extent_,
int target_threadblock_count = 128
):
status(Status::kSuccess),
extent(extent_),
outer_count(0),
inner_count(0),
workspace_count(0) {
//
// Plan the parallel mapping strategy.
//
outer_count = 1;
inner_count = 1;
// Compute number of elements in strided ranks
for (int p = 0; p < kReducedRank; ++p) {
outer_count *= extent[p];
}
for (int p = 0; p < kInnerRank; ++p) {
inner_count *= extent[kReducedRank + p];
}
int cta_count_x = 1;
int cta_count_y = 1;
int cta_count_z = 1;
int cta_threads_x = kThreads;
int cta_threads_y = 1;
int cta_threads_z = 1;
// Determine CTA shape
int64_t inner_vector_count = inner_count / kVectorLength;
// Priority 1. Assign threadblocks to outer indices if possible
if (outer_count > target_threadblock_count) {
cta_count_x = 1;
cta_count_y = target_threadblock_count;
cta_count_z = 1;
}
else {
cta_count_y = int(outer_count);
int remaining_ctas = target_threadblock_count / cta_count_y;
// Priority 2. Assign inner dimensions to one CTA
if (inner_vector_count > cta_threads_x) {
int64_t cta_z_bound = inner_vector_count / cta_threads_x;
if (cta_z_bound > remaining_ctas) {
cta_count_z = remaining_ctas;
}
else {
cta_count_z = int(cta_z_bound);
}
}
else {
cta_threads_x = reshape_pow2(int(inner_vector_count), cta_threads_x);
cta_count_z = 1;
}
}
grid_shape = dim3(cta_count_x, cta_count_y, cta_count_z);
threadblock_shape = dim3(cta_threads_x, cta_threads_y, cta_threads_z);
workspace_count = (cta_count_z > 1 ? cta_count_z : 0);
// Determine shape of final reduction kernel if needed
if (workspace_count) {
int final_threads = kThreads;
int final_ctas = 1;
if (outer_count > kThreads) {
final_ctas = int(outer_count + kThreads - 1) / kThreads;
}
else {
final_threads = int(outer_count);
}
grid_final = dim3(final_ctas, 1, 1);
threadblock_final = dim3(final_threads, 1, 1);
}
else {
grid_final = dim3(0, 0, 0);
threadblock_final = dim3(0, 0, 0);
}
}
/// Simple check to verify the object is initialized correctly
bool good() const {
return status == Status::kSuccess;
}
/// Size (in bytes) of <outer_count> workspace elements which are densely packed together
int64_t workspace_stride() const {
// Error condition
if (!good()) {
return 0;
}
return outer_count * sizeof_bits<ElementCompute>::value / 8;
}
/// Returns the size (in bytes) of a temporary workspace needed for reduction across CTAs
int64_t workspace_size() const {
// Error condition
if (!good()) {
return 0;
}
// No reduction across CTAs
if (grid_shape.z == 1) {
return 0;
}
return workspace_stride() * grid_shape.z;
}
/// Performs a reduction
Status reduce(
ElementOutput *dst_ptr, ///< Pointer to destination tensor
int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1)
ElementSource const *src_ptr, ///< Pointer to source tensor
int64_t src_stride[], ///< Stride vector (of length kRank - 1)
void *device_workspace_ptr = nullptr, ///< Device workspace
ElementCompute reduction_identity = ElementCompute(), ///< Reduction identity element
ReductionOp reduction_op = ReductionOp(), ///< Reduction operator
cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched
// Initial status check
if (!good()) {
return status;
}
// Guard against null workspace
if (workspace_count > 1 && device_workspace_ptr == nullptr) {
return Status::kErrorWorkspaceNull;
}
// Define reduction kernel
using ReductionKernel = kernel::TensorReductionAffineContiguous<
kRank,
kReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
kVectorLength,
ElementCompute,
kThreads>;
using FinalReductionKernel = kernel::TensorReductionAffineContiguousFinal<
kRank,
kReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
kVectorLength,
ElementCompute,
kThreads>;
using Params = typename ReductionKernel::Params;
// Construct the parameters
Params params(
extent,
dst_ptr,
dst_stride,
src_ptr,
src_stride,
static_cast<ElementCompute *>(device_workspace_ptr),
workspace_stride(),
workspace_count,
reduction_op,
reduction_identity);
// Shared memory size
int shared_mem_bytes = sizeof(typename ReductionKernel::SharedStorage);
// Launch the kernel
Kernel<ReductionKernel><<< grid_shape, threadblock_shape, shared_mem_bytes, stream >>>(params);
// Check error condition
if (cudaPeekAtLastError() == cudaSuccess) {
status = Status::kSuccess;
}
else {
status = Status::kErrorInternal;
}
// Final reduction kernel
if (workspace_count) {
Kernel<FinalReductionKernel><<< grid_final, threadblock_final, 0, stream >>>(params);
}
// Check error condition
if (cudaPeekAtLastError() == cudaSuccess) {
status = Status::kSuccess;
}
else {
status = Status::kErrorInternal;
}
return status;
}
/// Helper to use overloaded function call operator
Status operator()(
ElementOutput *dst_ptr, ///< Pointer to destination tensor
int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1)
ElementSource const *src_ptr, ///< Pointer to source tensor
int64_t src_stride[], ///< Stride vector (of length kRank - 1)
void *device_workspace_ptr = nullptr, ///< Pointer to device workspace
ElementCompute reduction_identity = ElementCompute(), ///< Reduction identity element
ReductionOp reduction_op = ReductionOp(), ///< Reduction operator
cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched
return reduce(dst_ptr, dst_stride, src_ptr, src_stride, device_workspace_ptr, reduction_identity, reduction_op, stream);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/reduction/device/tensor_reduce_affine_contiguous.h/0 | {
"file_path": "include/cutlass/reduction/device/tensor_reduce_affine_contiguous.h",
"repo_id": "include",
"token_count": 4226
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a structure containing strides and a pointer to tensor data.
TensorView is derived from TensorRef and contributes bounds to the tensor's index space. Thus,
it is a complete mathematical object and may be used in tensor algorithms. It is decoupled from
data storage and is therefore lightweight and may be embedded in larger tensor objects or
memory structures.
See cutlass/tensor_ref.h for more details about the mapping of the logical tensor index space to
linear memory.
*/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <cmath>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref_planar_complex.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Data type of element stored within tensor
typename Element_,
/// Maps a Coord<Rank_> in the logical tensor index space to the internal n-D array
typename Layout_
>
class TensorViewPlanarComplex : public TensorRefPlanarComplex<Element_, Layout_> {
public:
/// Base tensor reference
using Base = cutlass::TensorRefPlanarComplex<Element_, Layout_>;
/// Mapping function from logical coordinate to internal n-D array
using Layout = Layout_;
/// TensorRef pointing to constant memory
using ConstTensorRef = typename Base::ConstTensorRef;
/// Underlying TensorRef type
using TensorRef = Base;
/// Data type of individual access
using Element = Element_;
/// Reference type to an element
using Reference = Element &;
/// Logical rank of tensor index space
static int const kRank = Layout::kRank;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Coordinate in storage n-D array
using Stride = typename Layout::Stride;
/// TensorView pointing to constant memory
using ConstTensorView = TensorViewPlanarComplex<
typename platform::remove_const<Element>::type const,
Layout>;
/// TensorView pointing to non-constant memory
using NonConstTensorView = TensorViewPlanarComplex<
typename platform::remove_const<Element>::type,
Layout>;
/// Require at least rank=1. Mathematically, a rank=0 tensor would be considered to be a
/// scalar, but degenerate cases such as these are difficult to accommodate without
/// extensive C++ metaprogramming or support for zero-length arrays.
static_assert(kRank > 0, "Cannot define a zero-rank TensorRef");
private:
/// View extent
TensorCoord extent_;
public:
//
// Methods
//
/// Constructs a TensorView object
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex(TensorCoord const &extent = TensorCoord()): extent_(extent) {
}
/// Constructs a TensorView object
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex(
Element *ptr, ///< pointer to start of tensor
Layout const &layout, ///< layout object containing stride and mapping function
LongIndex imaginary_stride, ///< stride between real and imaginary part
TensorCoord const &extent ///< size of the view in logical coordinates
):
Base(ptr, layout, imaginary_stride), extent_(extent) {
}
/// Constructs a TensorView object
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex(
TensorRef const &ref, ///< pointer and layout object referencing a tensor
TensorCoord const &extent ///< logical size of tensor
):
Base(ref), extent_(extent) {
}
/// Converting constructor from TensorRef to non-constant data.
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex(
NonConstTensorView const &view ///< TensorView to non-const data
):
Base(view), extent_(view.extent_) { }
/// Updates the pointer and layout object
CUTLASS_HOST_DEVICE
void reset(Element* ptr, Layout const &layout, LongIndex imaginary_stride, TensorCoord size) {
Base::reset(ptr, layout, imaginary_stride);
this->resize(extent_);
}
/// Changes the size of the view without affecting pointer or layout
CUTLASS_HOST_DEVICE
void resize(TensorCoord extent) {
this->extent_ = extent;
}
/// Returns the extent of the view (the size along each logical dimension).
CUTLASS_HOST_DEVICE
TensorCoord const& extent() const { return extent_; }
/// Returns the extent along a particular logical dimension.
CUTLASS_HOST_DEVICE
Index extent(int dim) const { return extent_.at(dim); }
/// Determines whether a location is within a tensor
CUTLASS_HOST_DEVICE
bool contains(TensorCoord const& coord) const {
CUTLASS_PRAGMA_UNROLL
for (int dim = 0; dim < kRank; ++dim) {
if (!(coord[dim] >= 0 && coord[dim] < extent(dim))) {
return false;
}
}
return true;
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
Base ref() const {
return Base(this->data(), this->layout(), this->imaginary_stride());
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
ConstTensorRef const_ref() const {
return ConstTensorRef(this->data(), this->layout());
}
/// Returns a TensorView to const data
CUTLASS_HOST_DEVICE
ConstTensorView const_view() const {
return ConstTensorView(const_ref(), extent_);
}
/// Returns a Tensor_view given location and size quantities
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex subview(
TensorCoord extent, ///< extent of the resulting view
TensorCoord const& location = TensorCoord() ///< resulting view's origin within the old view
) const {
TensorViewPlanarComplex result(this->ref(), extent.clamp(extent_ - location));
result.add_coord_offset(location);
return result;
}
/// Returns the number of scalar elements needed to store tensor.
CUTLASS_HOST_DEVICE
size_t capacity() const {
return Base::layout().capacity(extent_);
}
/// Returns a TensorView offset by a given amount
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex operator+(
TensorCoord const& b ///< offset in the logical coordinate space of the tensor
) const {
TensorViewPlanarComplex result(*this);
result.add_pointer_offset(this->offset(b));
return result;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex& operator+=(
TensorCoord const& b ///< offset in the logical coordinate space of the tensor
) {
this->add_pointer_offset(this->offset(b));
return *this;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex operator-(
TensorCoord const& b ///< offset in the logical coordinate space of the tensor
) const {
TensorRef result(*this);
result.add_pointer_offset(-this->offset(b));
return result;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex& operator-=(
TensorCoord const& b ///< offset in the logical coordinate space of the tensor
) {
this->add_pointer_offset(-this->offset(b));
return *this;
}
/// TensorRef to real-valued tensor
CUTLASS_HOST_DEVICE
cutlass::TensorView<Element, Layout> view_real() const {
return cutlass::TensorView<Element, Layout>(this->data(), this->layout(), extent_);
}
/// TensorRef to real-valued tensor
CUTLASS_HOST_DEVICE
cutlass::TensorView<Element, Layout> view_imag() const {
return cutlass::TensorView<Element, Layout>(this->imaginary_data(), this->layout(), extent_);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructs a TensorRef, deducing types from arguments.
template <
typename Element,
typename Layout
>
CUTLASS_HOST_DEVICE TensorViewPlanarComplex<Element, Layout> make_TensorViewPlanarComplex(
Element *ptr,
Layout const &layout,
typename Layout::LongIndex imaginary_stride,
typename Layout::TensorCoord const &extent) {
return TensorViewPlanarComplex<Element, Layout>(ptr, layout, imaginary_stride, extent);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/tensor_view_planar_complex.h/0 | {
"file_path": "include/cutlass/tensor_view_planar_complex.h",
"repo_id": "include",
"token_count": 3203
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of tiles
from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last
"residue" tile first, with the objective of minimizing predicate mask updates
during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileAccessIteratorTriangularMatrix
///
template <typename Shape, typename Element, typename Layout,
int AdvanceRank, typename ThreadMap,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
typename AccessType>
class PredicatedTileAccessIteratorTriangularMatrix;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for pitch-linear data.
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, SideMode kSideMode, FillMode kFillMode, DiagType kDiagType, typename AccessType_>
class PredicatedTileAccessIteratorTriangularMatrix<Shape_, Element_, layout::PitchLinear,
AdvanceRank, ThreadMap_, kSideMode, kFillMode, kDiagType, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
using CompareOp = typename TrMatrixCompareOp<kFillMode, kDiagType>::Type;
static_assert( kFillMode == FillMode::kFull ||
((kFillMode == FillMode::kLower || kFillMode == FillMode::kUpper) && AccessType::kElements == 1),
"BLAS3 iterator for the triangular/symmetric matrix must use AccessType::kElements as 1");
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static int const kPredicatesPerByte = 4;
static int const kPredicatesPerWord = 4 * kPredicatesPerByte;
static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector;
/// Number of 32b words containing predicates
static int const kPredicateByteCount =
(kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte;
static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4;
static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u;
static_assert(kPredicateWordCount <= 4, "Too many predicates.");
/// Predicate vector stores mask to guard accesses
using Mask = Array<uint32_t, kPredicateWordCount>;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend PredicatedTileAccessIteratorTriangularMatrix;
private:
/// stride of pitch-linear layout (units of Element)
StrideIndex stride_;
/// (true) pitch-linear layout is mapped to row-major matrix
/// (false) pitch-linear layout is mapped to column-major matrix
bool is_row_major_;
/// for vectorized access across the diagonal boundary guard condition is
/// checked for the element on the boundary
int access_diagonal_boundary_;
/// amount (in byte) to increment pointer to move to next access along
/// strided dimension
LongIndex inc_strided_;
/// amount (in byte) to increment pointer from last access to first access
/// of next tile
LongIndex inc_next_;
/// amount (in byte) to increment pointer from first access of current tile
/// to first access of next tile
LongIndex inc_advance_;
public:
// Default ctor
CUTLASS_HOST_DEVICE
Params(): stride_(0), inc_strided_(0), inc_next_(0), inc_advance_(0), is_row_major_(false), access_diagonal_boundary_(0) { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout, bool is_row_major, int access_diagonal_boundary) :
stride_(layout.stride(0)), is_row_major_(is_row_major), access_diagonal_boundary_(access_diagonal_boundary) {
inc_strided_ = (LongIndex(stride_) * ThreadMap::Delta::kStrided) *
sizeof_bits<Element>::value / 8;
if (kAdvanceRank) {
// advance along strided dimension
inc_advance_ =
Shape::kStrided * LongIndex(stride_) * sizeof_bits<Element>::value / 8;
} else {
// advance along contiguous dimension
inc_advance_ = Shape::kContiguous * sizeof_bits<Element>::value / 8;
}
inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kStrided - 1) *
ThreadMap::Delta::kStrided * LongIndex(stride_) *
sizeof_bits<Element>::value / 8;
};
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Guard predicates
uint32_t predicates_[kPredicateWordCount];
/// Track global memory addresses on the diagonal
/// To ignore imag part for diagonal elements of hermitian matrices
uint32_t predicates_onDiag_[kPredicateWordCount];
/// Size of tensor
TensorCoord extent_;
/// Initial offset for each thread
TensorCoord thread_offset_;
/// Iteration along vectors implied by the thread map
int iteration_vector_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0u;
predicates_onDiag_[i] = 0u;
}
CompareOp compare_op;
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) {
int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int c = access_residual / kAccessesPerVector;
int v = access_residual % kAccessesPerVector;
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements,
s * ThreadMap::Delta::kStrided);
TensorCoord coord = thread_offset_ + iteration_coord;
bool guard;
bool onDiag = false;
guard = ((coord.strided() < extent.strided()) &&
(coord.contiguous() < extent.contiguous()));
// guard access on the wrong side of the triagular matrix diagonal
if (kFillMode == FillMode::kLower || kFillMode == FillMode::kUpper) {
coord += TensorCoord{params_.access_diagonal_boundary_, 0};
bool triagular_guard_row_major = compare_op(coord.strided(), coord.contiguous()) | !params_.is_row_major_;
bool triagular_guard_col_major = compare_op(coord.contiguous(), coord.strided()) | params_.is_row_major_;
guard = guard && triagular_guard_row_major && triagular_guard_col_major;
if (kDiagType == DiagType::kUnit) {
onDiag = (guard && coord.strided() == coord.contiguous()) ? true : false;
}
}
int pred_idx_onDiag = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx_onDiag = pred_idx_onDiag / kPredicatesPerWord;
int residual_onDiag = pred_idx_onDiag % kPredicatesPerWord;
int byte_idx_onDiag = residual_onDiag / kPredicatesPerByte;
int bit_idx_onDiag = residual_onDiag % kPredicatesPerByte;
predicates_onDiag_[word_idx_onDiag] |= (unsigned(onDiag) << (byte_idx_onDiag * 8 + bit_idx_onDiag));
int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
}
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(const_cast<NonConstPointer>(pointer))),
extent_(extent) {
// Per-thread offset in logical coordinates of tensor
thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id);
// update internal pointers
Layout layout(params_.stride_);
add_pointer_offset(layout(thread_offset_));
compute_predicates_(extent_);
set_iteration_index(0);
}
/// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id)
: PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided());
pointer_ += Shape::kContiguous * tile_offset.contiguous();
thread_offset_ += TensorCoord{0, Shape::kStrided * tile_offset.strided()};
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous());
pointer_ += Shape::kStrided * tile_offset.strided();
thread_offset_ += TensorCoord{Shape::kContiguous * tile_offset.contiguous(), 0};
}
compute_predicates_(extent_);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) / 8) + iteration_vector_;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
pointer_ += params_.inc_strided_;
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix operator++(int) {
PredicatedTileAccessIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = enable ? 0u : predicates_[i];
}
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0xffffffff;
}
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = mask[i];
}
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = predicates_[i];
}
}
/// Return if the address in on the diagonal
CUTLASS_HOST_DEVICE
bool getOnDiag() {
int pred_idx =
iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_onDiag_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
int pred_idx =
iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
//return true;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for column-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
typename AccessType_>
class PredicatedTileAccessIteratorTriangularMatrix<Shape_, Element_, layout::ColumnMajor,
AdvanceRank, ThreadMap_, kSideMode, kFillMode, kDiagType,
AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIteratorTriangularMatrix<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap,
kSideMode, kFillMode, kDiagType, AccessType>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
static int const kAccessDiagonalBoundary =
(kFillMode == FillMode::kLower) ? (AccessType::kElements - 1) : 0;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorTriangularMatrix;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0)), false, kAccessDiagonalBoundary){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(),
threadblock_offset.column())) {}
/// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix operator++(int) {
PredicatedTileAccessIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Return if the address in on the diagonal
CUTLASS_HOST_DEVICE
bool getOnDiag() {
return iterator_.getOnDiag();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
typename AccessType_>
class PredicatedTileAccessIteratorTriangularMatrix<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_,
kSideMode, kFillMode, kDiagType, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIteratorTriangularMatrix<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap,
kSideMode, kFillMode, kDiagType, AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
static int const kAccessDiagonalBoundary =
(kFillMode == FillMode::kUpper) ? (AccessType::kElements - 1) : 0;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorTriangularMatrix;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0)), true, kAccessDiagonalBoundary){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix operator++(int) {
PredicatedTileAccessIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Return if the address in on the diagonal
CUTLASS_HOST_DEVICE
bool getOnDiag() {
return iterator_.getOnDiag();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/predicated_tile_access_iterator_triangular_matrix.h/0 | {
"file_path": "include/cutlass/transform/threadblock/predicated_tile_access_iterator_triangular_matrix.h",
"repo_id": "include",
"token_count": 10731
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template wraps the vector access iterator concept to load whole vector from tensors in
memory. This is typically used for per-channel scale and bias in convolution kernels.
*/
#pragma once
#include "cutlass/transform/threadblock/predicated_vector_access_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename VectorAccessIterator_>
class VectorIterator {
public:
using VectorAccessIterator = VectorAccessIterator_;
using Shape = typename VectorAccessIterator::Shape;
using Element = typename VectorAccessIterator::Element;
using Layout = typename VectorAccessIterator::Layout;
using TensorCoord = typename Layout::TensorCoord;
using AccessType = typename VectorAccessIterator::AccessType;
using TensorRef = typename VectorAccessIterator::TensorRef;
using Index = typename VectorAccessIterator::Index;
using LongIndex = typename VectorAccessIterator::LongIndex;
static int const kElementsPerAccess = VectorAccessIterator::kElementsPerAccess;
static int const kRowsPerIteration = VectorAccessIterator::kRowsPerIteration;
static int const kThreads = VectorAccessIterator::kThreads;
static int const kIterations = VectorAccessIterator::kIterations;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<
Element, kElementsPerAccess * kIterations>;
private:
/// Internal state
VectorAccessIterator vector_access_iterator_;
public:
/// Constructor
CUTLASS_HOST_DEVICE
VectorIterator(
Element const *ptr,
TensorCoord extent,
int thread_idx,
int warp_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
vector_access_iterator_(ptr, extent, thread_idx, warp_idx, threadblock_offset) { }
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
VectorIterator &operator++() {
vector_access_iterator_.advance();
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
VectorIterator operator++(int) {
VectorIterator self(*this);
operator++();
return self;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.clear();
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[c],
vector_access_iterator_.get() + pointer_offset,
vector_access_iterator_.valid()
);
++vector_access_iterator_;
}
// }
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
vector_access_iterator_.set_iteration_index(0);
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void advance() {
vector_access_iterator_.advance();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/vector_iterator.h/0 | {
"file_path": "include/cutlass/transform/threadblock/vector_iterator.h",
"repo_id": "include",
"token_count": 1488
} | 36 |
# Predication: What to do when tiling isn't perfect
The [GEMM tutorial](./0x_gemm_tutorial.md) shows how
we compute a matrix-matrix multiply
by iterating over tiles of the input matrices and output matrix.
The examples all assume that the tiles fit evenly into the matrices,
with no remainder.
What do we do if this is not the case?
For example, we might want to tile a 41 x 55 matrix into 4 x 8 tiles,
but 41 / 4 is 10 remainder 1, and 55 / 8 is 6 remainder 7.
What do we do with those "leftover" parts of the matrix?
Another way to say this, is that `logical_divide`
(CuTe's way of tiling layouts) "rounds up."
For example, if `N` is the layout (1000, 1) and `B` is the layout (128, 1),
then `logical_divide(N, B)` is the layout ((128, 8), (1, 128)).
This effectively rounds up the original shape N = 1000
into an 128 x 8 matrix (as if N = 1024).
What about those last 24 elements,
that aren't part of the original data?
The idiomatic CuTe way to solve this problem is through "predication."
Rather than trying to reason about the "remainder tiles,"
CuTe instead rounds up, but only tries to access data in each tile
that are part of the matrix.
This corresponds well with how our GPUs optimize:
branches without warp divergence are relatively fast.
It also matches the usual CUDA idiom
when dividing N work items in 1-D fashion over B thread blocks:
first test if "my thread" is out of bounds before doing work.
There are a few ways to figure out
which elements need to be predicated.
In-kernel GEMMs like to do this in the following way.
```c++
// Create the predicate tensor
Layout idA = make_layout(shape(A)); // e.g. 1000:1
Layout idAB = logical_divide(idA, B); // e.g. (128,8):(1,128)
Tensor pred = make_tensor<bool>(shape(idAB));
for (int i = 0; i < size(pred); ++i) {
pred(i) = idAB(i) < size(A);
}
// ... intervening code ...
// Use the predicate tensor. c is some coordinate.
// This code would likely live inside some algorithm.
if (pred(c)) { copy(idAB(c), smem(c)); }
```
The general procedure is that we
1. create an "identity" layout (`Layout idA = make_layout(shape(A))`,
in the above example) with the same shape as our original data;
2. repeat the same tiling/partitioning/slicing (possibly rounding up)
on that identity layout (`Layout idAB = logical_divide(idA, B)`);
3. create a "predicate tensor" by comparing the coordinates
of that reference layout with the bounds of the original layout;
and then
4. use the predicate tensor to mask off accesses to out-of-bounds elements.
For example, suppose that we've partitioned A and B tiles
across threads as follows.
```c++
Tensor tAgA = local_partition(gA, tA, thread_idx); // (THR_M,THR_K,k)
Tensor tAsA = local_partition(sA, tA, thread_idx); // (THR_M,THR_K,PIPE)
Tensor tBgB = local_partition(gB, tB, thread_idx); // (THR_N,THR_K,k)
Tensor tBsB = local_partition(sB, tB, thread_idx); // (THR_N,THR_K,PIPE)
```
`tAgA` and `tBgB` partition the global A resp. B matrices over threads,
and `tAsA` and `tBsB` partition the shared memory tiles of A resp. B over threads.
The following code creates predicate tensors
corresponding to `tAgA` and `tBgB`.
They will be computed once in the prologue.
and will be used to mask off instructions in the inner loop.
```c++
Tensor tApA = make_tensor<bool>(make_shape (size<0>(tAgA), size<1>(tAgA)),
make_stride( Int<1>{}, Int<0>{}));
Tensor tBpB = make_tensor<bool>(make_shape (size<0>(tBgB), size<1>(tBgB)),
make_stride( Int<1>{}, Int<0>{}));
```
We're only thread-parallelizing over the leftmost (row) dimension,
so we only need to predicate over the leftmost dimension.
Thus, we can make the rightmost (column) stride zero,
since we will never actually address the rightmost dimension.
The following code creates "two-dimensional identity tensors"
that map coordinates (m,k) -> (m,k)
for the tile of data within the thread block.
```c++
Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k)
Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k)
```
The following lines then tile and partition
the two reference tensors
in exactly the same way the data were tiled and partitioned
into `tAsA` and `tBsB`.
```c++
Tensor tAcA = local_partition(cA, tA, thread_idx);
Tensor tBcB = local_partition(cB, tB, thread_idx);
```
Tiling and partitioning affect the offset and domain,
but not the codomain of the tensors,
so we're left with tensors that map `(thr_m,thr_k) -> (m,k)`
where `(thr_m,thr_k)` is this particular thread's subtensor of the tile
and `(m,k)` is the original codomain: a coordinate into the original tile.
The unrolled loops in the code below then compare
the m- and n-coordinates of those tensors with our known maximums
to mask off elements we are not allowed to access.
```c++
Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k)
Tensor tAcA = local_partition(cA, tA, thread_idx);
Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k)
Tensor tBcB = local_partition(cB, tB, thread_idx);
// Populate
CUTE_UNROLL
for (int m = 0; m < size<0>(tApA); ++m) {
tApA(m,0) = get<0>(tAcA(m,0)) < m_max_coord;
}
CUTE_UNROLL
for (int n = 0; n < size<0>(tBpB); ++n) {
tBpB(n,0) = get<0>(tBcB(n,0)) < n_max_coord;
}
```
Those last `for` loops fill in the two predicate tensors.
In this case, we only need to predicate over the leftmost dimension,
so we only address `(m,0)` resp. `(n,0)`.
We can then use the predicate tensors in `copy_if`
to copy only the elements for which the corresponding
predicate tensor elements are nonzero.
```c++
// Prefetch k_tile=0, gate these on k_residue as well
CUTE_UNROLL
for (int k = 0; k < size<1>(tAsA); ++k) {
if (get<1>(tAcA(0,k)) >= -k_residue) { // some other condition on the column index
copy_if(tApA, tAgA(_,k,0), tAsA(_,k,0));
}
}
CUTE_UNROLL
for (int k = 0; k < size<1>(tBsB); ++k) {
if (get<1>(tBcB(0,k)) >= -k_residue) { // some other condition on the column index
copy_if(tBpB, tBgB(_,k,0), tBsB(_,k,0));
}
}
```
Here are some advantages of this "reference tensor" approach.
1. It doesn't depend on the layout/strides of the tensor
being predicated, just the logical bounds being imposed.
2. The partitioning stage can be anything.
3. It naturally extends to any-dimensional predication.
4. It's a natural generalization of a typical CUDA 1-D
parallel vector access pattern,
which computes an access index `k`
(e.g., as `blockDim.x * blockIdx.x + threadIdx.x`)
and then predicates access to the vector's `k`-th element
on whether `k` is in bounds.
As an example of (3), the epilogue predication does exactly the same thing,
```c++
// Repeat with a tensor of coordinates for predication
Tensor cC = make_identity_tensor(make_shape(size<0>(gC), size<1>(gC)));
Tensor tCcC = thr_mma.partition_C(cC);
const bool isBetaZero = (beta == 0);
CUTE_UNROLL
for (int i = 0; i < size(tCrC); ++i) {
if (elem_less(tCcC(i), make_coord(m_max_coord,n_max_coord))) {
tCgC(i) = isBetaZero ? alpha * tCrC(i) : alpha * tCrC(i) + beta * tCgC(i);
}
}
```
but with the mma responsible for the tiling/partitioning `tCcC`
so that the reference subtensor matches the accumulator's subtensor.
Then, the reference subtensor is predicated against the `if` bounds
(in both m- and n-coordinates) inside the `for` loop.
Another way to explain this is that we don't modify the tiles
to give you the "right" extents so that you never overrun.
Instead, we let you query the original coordinate
to see if that coordinate overruns.
This avoids all branching and variable/dynamic loop bounds
(thus maintaining load balance and synchronicity,
both very important in-kernel) in favor of predication.
It's also general enough to extend to all ranks,
all layouts of threads and data,
and all tiling/partitioning patterns.
| media/docs/cute/0y_predication.md/0 | {
"file_path": "media/docs/cute/0y_predication.md",
"repo_id": "media",
"token_count": 2994
} | 37 |
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Quick Start Guide")
[README](../../README.md#documentation) > **Quick Start**
# Quickstart
## Prerequisites
CUTLASS requires:
- NVIDIA CUDA Toolkit (11.4 or later required, [12.0](https://developer.nvidia.com/cuda-toolkit) recommended)
- CMake 3.18+
- host compiler supporting C++17 or greater (minimum g++ 7.5.0)
- Python 3.6+
CUTLASS may be optionally compiled and linked with
- cuBLAS
- cuDNN v7.6 or later
## Initial build steps
Construct a build directory and run CMake.
```bash
$ export CUDACXX=${CUDA_INSTALL_PATH}/bin/nvcc
$ mkdir build && cd build
$ cmake .. -DCUTLASS_NVCC_ARCHS=90a # compiles for NVIDIA Hopper GPU architecture
```
If your goal is strictly to build only the CUTLASS Profiler and to minimize compilation time, we suggest
executing the following CMake command in an empty `build/` directory.
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS=90a -DCUTLASS_ENABLE_TESTS=OFF -DCUTLASS_UNITY_BUILD_ENABLED=ON
```
This reduces overall compilation time by excluding unit tests and enabling the unity build.
You may reduce build times by compiling only certain operations by setting the `CUTLASS_LIBRARY_OPERATIONS` flag as shown below,
executed from an empty `build/` directory. This only compiles 2-D convolution kernels.
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS=90a -DCUTLASS_LIBRARY_OPERATIONS=conv2d
```
You may also filter kernels by name by supplying a filter string with flag `CUTLASS_LIBRARY_KERNELS`. For example the below command selects only CUTLASS-3 kernels.
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS=90a -DCUTLASS_LIBRARY_KERNELS=cutlass3x*
```
See more examples on selectively compiling CUTLASS GEMM and convolution kernels [here](quickstart.md#example-cmake-commands).
You may explicitly exclude cuBLAS and cuDNN as dependencies with the following CMake flags.
- `-DCUTLASS_ENABLE_CUBLAS=OFF`
- `-DCUTLASS_ENABLE_CUDNN=OFF`
## Build and run the CUTLASS Profiler
From the `build/` directory created above, compile the CUTLASS Profiler.
```bash
$ make cutlass_profiler -j12
```
Then execute the CUTLASS Profiler computing GEMM, execute the following command.
```bash
$ ./tools/profiler/cutlass_profiler --kernels=sgemm --m=4352 --n=4096 --k=4096
=============================
Problem ID: 1
Provider: CUTLASS
Operation: cutlass_simt_sgemm_128x128_nn
Disposition: Passed
Status: Success
Arguments: --m=4352 --n=4096 --k=4096 --A=f32:column --B=f32:column --C=f32:column --alpha=1 --beta=0 \
--split_k_slices=1 --batch_count=1 --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 \
--stages=2 --warps_m=2 --warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 \
--max_cc=1024
Bytes: 52428800 bytes
FLOPs: 146064539648 flops
Runtime: 10.5424 ms
Memory: 4.63158 GiB/s
Math: 13854.9 GFLOP/s
```
To execute the CUTLASS Profiler for convolution, run the following example.
```bash
$ ./tools/profiler/cutlass_profiler --kernels=s1688fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --pad_h=1 --pad_w=1
```
To execute all CUTLASS 2-D convolution operators, execute the following.
```bash
$ ./tools/profiler/cutlass_profiler --operation=conv2d --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3
=============================
Problem ID: 1
Provider: CUTLASS
OperationKind: conv2d
Operation: cutlass_simt_sfprop_optimized_128x128_8x2_nhwc
Status: Success
Verification: ON
Disposition: Passed
reference_device: Passed
Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \
--stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f32:nhwc --Filter=f32:nhwc --Output=f32:nhwc \
--conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \
--eq_gemm_provider=none --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \
--warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024
Bytes: 2055798784 bytes
FLOPs: 118482796544 flops
Runtime: 8.13237 ms
Memory: 235.431 GiB/s
Math: 14569.3 GFLOP/s
```
See [documentation for the CUTLASS Profiler](profiler.md) for more details.
## Build and run CUTLASS Unit Tests
From the `build/` directory created above, simply build the target `test_unit` to compile and run
all unit tests.
```bash
$ make test_unit -j
...
...
...
[----------] Global test environment tear-down
[==========] 946 tests from 57 test cases ran. (10812 ms total)
[ PASSED ] 946 tests.
$
```
The exact number of tests run is subject to change as we add more functionality.
No tests should fail. Unit tests automatically construct the appropriate runtime filters
to avoid executing on architectures that do not support all features under test.
The unit tests are arranged hierarchically mirroring the CUTLASS Template Library. This enables
parallelism in building and running tests as well as reducing compilation times when a specific
set of tests are desired.
For example, the following executes strictly the warp-level GEMM tests.
```bash
$ make test_unit_gemm_warp -j
...
...
[----------] 3 tests from SM75_warp_gemm_tensor_op_congruous_f16
[ RUN ] SM75_warp_gemm_tensor_op_congruous_f16.128x128x8_32x128x8_16x8x8
[ OK ] SM75_warp_gemm_tensor_op_congruous_f16.128x128x8_32x128x8_16x8x8 (0 ms)
[ RUN ] SM75_warp_gemm_tensor_op_congruous_f16.128x128x32_64x64x32_16x8x8
[ OK ] SM75_warp_gemm_tensor_op_congruous_f16.128x128x32_64x64x32_16x8x8 (2 ms)
[ RUN ] SM75_warp_gemm_tensor_op_congruous_f16.128x128x32_32x32x32_16x8x8
[ OK ] SM75_warp_gemm_tensor_op_congruous_f16.128x128x32_32x32x32_16x8x8 (1 ms)
[----------] 3 tests from SM75_warp_gemm_tensor_op_congruous_f16 (3 ms total)
...
...
[----------] Global test environment tear-down
[==========] 104 tests from 32 test cases ran. (294 ms total)
[ PASSED ] 104 tests.
[100%] Built target test_unit_gemm_warp
```
## Building for Multiple Architectures
To minimize compilation time, specific GPU architectures can be enabled via the CMake command,
selected by [CUDA Compute Capability.](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities)
**NVIDIA Ampere Architecture.**
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS=90a # compiles for NVIDIA Hopper GPU architecture
```
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS=80 # compiles for NVIDIA Ampere GPU architecture
```
**NVIDIA Turing Architecture.**
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS=75 # compiles for NVIDIA Turing GPU architecture
```
**NVIDIA Volta Architecture.**
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS=70 # compiles for NVIDIA Volta GPU architecture
```
**NVIDIA Pascal Architecture.**
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS="60;61" # compiles for NVIDIA Pascal GPU architecture
```
**NVIDIA Maxwell Architecture.**
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS="50;53" # compiles for NVIDIA Maxwell GPU architecture
```
## Using CUTLASS within other applications
Applications should list [`/include`](/include) within their include paths. They must be
compiled as C++17 or greater.
**Example:** print the contents of a variable storing half-precision data.
```c++
#include <iostream>
#include <cutlass/cutlass.h>
#include <cutlass/numeric_types.h>
#include <cutlass/core_io.h>
int main() {
cutlass::half_t x = 2.25_hf;
std::cout << x << std::endl;
return 0;
}
```
## Launching a GEMM kernel in CUDA
**Example:** launch a mixed-precision GEMM targeting Turing Tensor Cores.
_Note, this example uses CUTLASS Utilities. Be sure `tools/util/include` is listed as an include path._
```c++
#include <cutlass/numeric_types.h>
#include <cutlass/gemm/device/gemm.h>
#include <cutlass/util/host_tensor.h>
int main() {
// Define the GEMM operation
using Gemm = cutlass::gemm::device::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor, // LayoutOutput
float, // ElementAccumulator
cutlass::arch::OpClassTensorOp, // tag indicating Tensor Cores
cutlass::arch::Sm75 // tag indicating target GPU compute architecture
>;
Gemm gemm_op;
cutlass::Status status;
//
// Define the problem size
//
int M = 512;
int N = 256;
int K = 128;
float alpha = 1.25f;
float beta = -1.25f;
//
// Allocate device memory
//
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A({M, K});
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B({K, N});
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C({M, N});
cutlass::half_t const *ptrA = A.device_data();
cutlass::half_t const *ptrB = B.device_data();
cutlass::half_t const *ptrC = C.device_data();
cutlass::half_t *ptrD = C.device_data();
int lda = A.device_ref().stride(0);
int ldb = B.device_ref().stride(0);
int ldc = C.device_ref().stride(0);
int ldd = C.device_ref().stride(0);
//
// Launch GEMM on the device
//
status = gemm_op({
{M, N, K},
{ptrA, lda}, // TensorRef to A device tensor
{ptrB, ldb}, // TensorRef to B device tensor
{ptrC, ldc}, // TensorRef to C device tensor
{ptrD, ldd}, // TensorRef to D device tensor - may be the same as C
{alpha, beta} // epilogue operation arguments
});
if (status != cutlass::Status::kSuccess) {
return -1;
}
return 0;
}
```
Note, the above could be simplified as follows using helper methods defined in `HostTensor`.
```c++
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A({M, K});
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B({K, N});
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C({M, N});
//
// Use the TensorRef returned by HostTensor::device_ref().
//
status = gemm_op({
{M, N, K},
A.device_ref(), // TensorRef to A device tensor
B.device_ref(), // TensorRef to B device tensor
C.device_ref(), // TensorRef to C device tensor
C.device_ref(), // TensorRef to D device tensor - may be the same as C
{alpha, beta} // epilogue operation arguments
});
```
## Launching a GEMM kernel using CUTLASS 3.0 or newer
**Example:** launch a mixed-precision GEMM targeting Hopper Tensor Cores.
```c++
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/packed_stride.hpp"
using namespace cute;
int main(int argc, char const **args) {
// A matrix configuration
using ElementA = cutlass::half_t; // Element type for A matrix operand
using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes)
// B matrix configuration
using ElementB = cutlass::half_t; // Element type for B matrix operand
using LayoutB = cutlass::layout::ColumnMajor; // Layout type for B matrix operand
constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes)
// C/D matrix configuration
using ElementC = cutlass::half_t; // Element type for C and D matrix operands
using LayoutC = cutlass::layout::ColumnMajor; // Layout type for C and D matrix operands
// Core kernel configurations
using ElementAccumulator = float; // Element type for internal accumulation
using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that supports the intended feature
using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag
using TilesShape = Shape<_128,_128,_64>; // Threadblock-level tile size
using ClusterShape = Shape<_1,_2,_1>; // Shape of the threadblocks in a cluster
using StageCountType = cutlass::gemm::collective::StageCountAuto; // Stage count maximized based on the tile size
using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto; // Kernel to launch based on the default setting in the Collective Builder
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
ArchTag, OperatorClass,
ElementA, LayoutA, AlignmentA,
ElementB, LayoutB, AlignmentB,
ElementAccumulator,
TilesShape, ClusterShape,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using CollectiveEpilogue = cutlass::epilogue::collective::DefaultEpilogue<
cutlass::gemm::TagToStrideC_t<LayoutC>,
cutlass::gemm::TagToStrideC_t<LayoutC>,
cutlass::epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>>;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int>, // Indicates ProblemShape
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
Gemm gemm_op;
cutlass::Status status;
//
// Define the problem size
//
int M = 512;
int N = 256;
int K = 128;
float alpha = 1.25f;
float beta = -1.25f;
//
// Allocate device memory
//
cutlass::DeviceAllocation<typename Gemm::ElementA> block_A;
cutlass::DeviceAllocation<typename Gemm::ElementB> block_B;
cutlass::DeviceAllocation<typename Gemm::ElementC> block_C;
cutlass::DeviceAllocation<typename Gemm::EpilogueOutputOp::ElementOutput> block_D;
using StrideA = typename Gemm::GemmKernel::StrideA;
using StrideB = typename Gemm::GemmKernel::StrideB;
using StrideC = typename Gemm::GemmKernel::StrideC;
using StrideD = typename Gemm::GemmKernel::StrideD;
StrideA stride_A;
StrideB stride_B;
StrideC stride_C;
StrideD stride_D;
stride_A = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(M, K, Int<1>{}));
stride_B = cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(N, K, Int<1>{}));
stride_C = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, Int<1>{}));
stride_D = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, Int<1>{}));
block_A.reset(M * K);
block_B.reset(K * N);
block_C.reset(M * N);
block_D.reset(M * N);
//
// Launch GEMM on the device
//
status = gemm_op({
cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, K},
block_A.get(),
stride_A,
block_B.get(),
stride_B,
{block_C.get(), stride_C, block_D.get(), stride_D, {alpha, beta}}
});
if (status != cutlass::Status::kSuccess) {
return -1;
}
return 0;
}
```
# CUTLASS Library
The [CUTLASS Library](/tools/library) defines an API for managing and executing collections of compiled
kernel instances and launching them from host code without template instantiations in client code.
The host-side launch API is designed to be analogous to BLAS implementations for convenience, though its
kernel selection procedure is intended only to be functionally sufficient. It may not launch the
optimal tile size for a given problem. It chooses the first available kernel whose data types,
layouts, and alignment constraints satisfy the given problem. Kernel instances and a data structure
describing them are completely available to client applications which may choose to implement their
own selection logic.
[cuBLAS](https://developer.nvidia.com/cublas) offers the best performance and functional coverage
for dense matrix computations on NVIDIA GPUs.
The CUTLASS Library is used by the CUTLASS Profiler to manage kernel instances, and it is also used
by several SDK examples.
* [10_planar_complex](/examples/10_planar_complex/planar_complex.cu)
* [11_planar_complex_array](/examples/11_planar_complex_array/planar_complex_array.cu)
The CUTLASS Library defines enumerated types describing numeric data types, matrix and tensor
layouts, math operation classes, complex transformations, and more.
Client applications should specify [`tools/library/include`](/tools/library/include) in their
include paths and link against libcutlas_lib.so.
The CUTLASS SDK example [10_planar_complex](/examples/10_planar_complex/CMakeLists.txt) specifies
its dependency on the CUTLASS Library with the following CMake command.
```
target_link_libraries(
10_planar_complex
PRIVATE
cutlass_lib
cutlass_tools_util_includes
)
```
A sample kernel launch from host-side C++ is shown as follows.
```c++
#include "cutlass/library/library.h"
#include "cutlass/library/handle.h"
int main() {
//
// Define the problem size
//
int M = 512;
int N = 256;
int K = 128;
float alpha = 1.25f;
float beta = -1.25f;
//
// Allocate device memory
//
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> A({M, K});
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> B({K, N});
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> C({M, N});
float const *ptrA = A.device_data();
float const *ptrB = B.device_data();
float const *ptrC = C.device_data();
float *ptrD = C.device_data();
int lda = A.device_ref().stride(0);
int ldb = B.device_ref().stride(0);
int ldc = C.device_ref().stride(0);
int ldd = D.device_ref().stride(0);
//
// CUTLASS Library call to execute device GEMM
//
cutlass::library::Handle handle;
//
// Launch GEMM on CUDA device.
//
cutlass::Status status = handle.gemm(
M,
N,
K,
cutlass::library::NumericTypeID::kF32, // data type of internal accumulation
cutlass::library::NumericTypeID::kF32, // data type of alpha/beta scalars
&alpha, // pointer to alpha scalar
cutlass::library::NumericTypeID::kF32, // data type of A matrix
cutlass::library::LayoutTypeID::kColumnMajor, // layout of A matrix
ptrA, // pointer to A matrix in device memory
lda, // leading dimension of A matrix
cutlass::library::NumericTypeID::kF32, // data type of B matrix
cutlass::library::LayoutTypeID::kColumnMajor, // layout of B matrix
ptrB, // pointer to B matrix in device memory
ldb, // leading dimension of B matrix
&beta, // pointer to beta scalar
cutlass::library::NumericTypeID::kF32, // data type of C and D matrix
ptrC, // pointer to C matrix in device memory
ldc, // leading dimension fo C matrix
ptrD, // pointer to D matrix in device memory
ldd // leading dimension of D matrix
);
if (status != cutlass::Status::kSuccess) {
return -1;
}
return 0;
}
```
# Example CMake Commands
To instantiate all operations supporting all tile sizes, data types, and alignment constraints, specify
`-DCUTLASS_LIBRARY_KERNELS=all` when running `cmake`.
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS='70;75;80' -DCUTLASS_LIBRARY_KERNELS=all
```
The above command line generates about twenty thousand kernels targeting NVIDIA Ampere, Turing, and Volta architectures.
Compiling thousands of kernels for three different architectures is time-consuming. Additionally, this would also result
in a large binary size and on some platforms linker to fail on building the library.
Enabling the "unity build" instantiates multiple kernel instances in each compilation unit, thereby reducing binary size
and avoiding linker limitations on some platforms.
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" -DCUTLASS_LIBRARY_KERNELS=all -DCUTLASS_UNITY_BUILD_ENABLED=ON
```
It is advised to only compile CUTLASS kernels for NVIDIA architectures one plans on running. Furthermore, kernels
can be selectively included in the CUTLASS Library by specifying filter strings and wildcard characters when executing CMake.
Several examples are defined below for convenience. They may be combined as a comma-delimited list.
Compling only the kernels desired reduces compilation time.
## GEMM CMake Examples
**Example.** All GEMM kernels targeting NVIDIA Ampere Tensor Cores.
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS=80 -DCUTLASS_LIBRARY_KERNELS=tensorop*gemm
```
**Example.** All GEMM kernels targeting NVIDIA Turing Tensor Cores.
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS=75 -DCUTLASS_LIBRARY_KERNELS=tensorop*gemm
```
**Example.** All GEMM kernels with FP32 accumulation targeting NVIDIA Ampere, Turing, and Volta architectures.
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" -DCUTLASS_LIBRARY_KERNELS=s*gemm
```
**Example.** All kernels which expect A and B to be column-major or row-major targeting NVIDIA Ampere, Turing, and Volta architectures.
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" -DCUTLASS_LIBRARY_KERNELS=gemm*nn,gemm*tt
```
**Example.** All planar complex GEMM variants targeting NVIDIA Ampere, Turing, and Volta architectures.
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" -DCUTLASS_LIBRARY_KERNELS=planar_complex
```
## Convolution CMake Examples
**Example.** All convolution kernels targeting NVIDIA Ampere's 16816 Tensor Core operation
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS='80' -DCUTLASS_LIBRARY_KERNELS=s16816fprop,s16816dgrad,s16816wgrad
```
**Example.** All forward propagation (fprop) convolution kernels targeting CUDA Cores for multiple NVIDIA architectures
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS='50;60;61;70;75;80' -DCUTLASS_LIBRARY_KERNELS=sfprop
```
**Example.** All forward propagation (fprop) convolution kernels with FP32 accumulation and FP16 input targeting NVIDIA Ampere's 16816 Tensor Core operation
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS='80' -DCUTLASS_LIBRARY_KERNELS=s16816fprop_*_f16
```
**Example.** All backward weight gradient (wgrad) convolution kernels with FP32 accumulation, FP16 input, and optimized global memory iterator
targeting NVIDIA Ampere, Turing, and Volta Tensor Core operations
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS='70;75;80' -DCUTLASS_LIBRARY_KERNELS=tensorop*s*wgrad_optimized_f16
```
# Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| media/docs/quickstart.md/0 | {
"file_path": "media/docs/quickstart.md",
"repo_id": "media",
"token_count": 9739
} | 38 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Base class for Epilogue Visitor Emitter
"""
from cutlass_library import DataTypeTag
from cutlass.backend.evt.ir import TopoVisitorNode, DAGIR
class FusionCallbacks:
def __init__(self, dag_ir: DAGIR, cc: int, emit_CD=True) -> None:
"""
Emit the EVT fusion callbacks
:param dag_ir: the DAG IR holding the epilogue visitor
:param cc: compute capability
:param emit_CD: whether to emit nodes C & D as a part of the fusion callbacks
For Sm90, set emit_CD=False, as Tensor C & D are hardcoded in the collective API
so that their shared memory can be explicitly reused
For Sm89, set emit_CD=True as they are treated as normal AuxLoad & AuxStore nodes.
"""
self.dag_ir = dag_ir
self.emit_CD = emit_CD
self.cc = cc
if self.cc < 90:
self.namespace = "threadblock"
else:
self.namespace = "fusion"
#
# Helper functions
#
def get_visitor_name(self, node: str):
"""
Get the visitor name
"""
meta = self.dag_ir.get_node_meta(node)
if not isinstance(meta, TopoVisitorNode) and self.dag_ir.in_degree(node) > 0:
return f"EVT{meta.name_camel}"
else:
return meta.name_camel
def emit(self):
node_metas = self.dag_ir.node_metas_topological_order()
epilogue_str = ""
# Step 1: emit individual node type decl
# emit the EVT & DAG connector
for meta in node_metas:
if not meta.disabled:
epilogue_str += self.emit_node(meta)
if not self.emit_CD and meta.name == "D":
continue
if isinstance(meta, TopoVisitorNode):
epilogue_str += self.emit_dag(meta)
else:
epilogue_str += self.emit_evt(meta)
# Step 2: post-processing & get callback name
if not self.emit_CD:
if not self.dag_ir.has_node("C"):
epilogue_str += "using ElementC = void;\nusing StrideC = StrideD;\n"
output_node = self.dag_ir.get_all_inputs("D")[0]
# The callback is the src of node D
callback_name = self.get_visitor_name(output_node)
else:
# The callback is the last node in the topological order
callback_name = self.get_visitor_name(node_metas[-1].name)
return epilogue_str, callback_name
def emit_evt(self, node):
if self.dag_ir.in_degree(node.name) == 0:
return ""
evt_tmp = f"""
using EVT{node.name_camel} = cutlass::epilogue::{self.namespace}::Sm{self.cc}EVT<
{node.name_camel},
"""
sorted_children = self.dag_ir.get_all_inputs(node.name)
evt_node_strs = [f" {self.get_visitor_name(child_name)}" for child_name in sorted_children]
evt_tmp += ",\n".join(evt_node_strs) + ">;\n"
return evt_tmp
def emit_dag(self, node):
subgraph = node.subgraph
subgraph_nodes = subgraph.nodes_topological_order()
# Emit the Edge Tuple
edge_tuples = "cute::tuple<\n"
for n in subgraph_nodes[:-1]:
in_edges = subgraph.in_edges(n)
edge_weights = [subgraph.get_edge_weight(edge[0], edge[1]) for edge in in_edges]
sorted_children = [edge[0] for _, edge in sorted(zip(edge_weights, in_edges))]
edge_tuple = " cute::seq<"
edge_str = [str(subgraph_nodes.index(child)) for child in sorted_children]
edge_tuple += ", ".join(edge_str) + ">,\n"
edge_tuples += edge_tuple
edge_tuples += " >"
# Emit the node list
dag_nodes = ""
dag_node_strs = []
for n in subgraph_nodes[:-1]:
n_meta = subgraph.get_node_meta(n)
if n_meta.disabled:
dag_node_strs.append(f" {self.get_visitor_name(n)}")
else:
dag_node_strs.append(f" {n_meta.name_camel}")
dag_nodes = ",\n".join(dag_node_strs)
return f"""
using {node.name_camel} = cutlass::epilogue::{self.namespace}::Sm{self.cc}TopologicalVisitor<
{DataTypeTag[node.subgraph.element_compute]},
{edge_tuples},
{dag_nodes}
>;
"""
def emit_node(self, node):
if isinstance(node, TopoVisitorNode):
emission = ""
for node in node.subgraph.node_metas_topological_order():
if not node.disabled:
emission += self.emit_node(node)
return emission
else:
return node.underlying_impl.type_decl
| python/cutlass/backend/evt/backend/emitter_base.py/0 | {
"file_path": "python/cutlass/backend/evt/backend/emitter_base.py",
"repo_id": "python",
"token_count": 2763
} | 39 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Store node and implementations
"""
import ctypes
from cutlass_library import DataType
from cutlass.backend.c_types import tuple_factory
from cutlass.backend.epilogue import dtype2ctype, to_ctype_value
from cutlass.backend.evt.ir.node import NodeBase, ImplBase, NoOpImpl
from cutlass.backend.evt.ir.tensor import Tensor
from cutlass.backend.library import FloatRoundStyle, FunctionalOp
class StoreImplBase(ImplBase):
"""
Base class for store node implementation
"""
reserved_names = ["D"]
def __init__(self, node) -> None:
super().__init__(node)
self.element = node.element
self.element_output = node.element_output
self.stride = node.store_tensor.stride
class StoreDImpl(StoreImplBase):
"""
Store D implementation
"""
@property
def argument_type_d(self):
stride_mnl = self.get_stride_mnl()
tuple_type = tuple_factory(stride_mnl, self.stride_dtype)
class _Argument(ctypes.Structure):
_fields_ = [
("ptr_D", ctypes.c_void_p),
("stride_D", tuple_type)
]
def __init__(self, ptr: int) -> None:
self.ptr_D = ptr
self.stride_D = tuple_type(stride_mnl)
return _Argument
@staticmethod
def match(node, problem_size: tuple):
if node.name == "D" and node.store_tensor.shape == problem_size:
return True
return False
class AuxStoreImpl(StoreImplBase):
def __init__(self, node) -> None:
super().__init__(node)
self.round_style = FloatRoundStyle.ToNearest
@property
def argument_type(self):
stride_mnl = self.get_stride_mnl()
name = self.name
tuple_type = tuple_factory(stride_mnl, self.stride_dtype)
class _Argument(ctypes.Structure):
_fields_ = [
("ptr_aux", ctypes.c_void_p),
("dAux", tuple_type)
]
def __init__(self, kwargs) -> None:
ptr = kwargs[name]
self.ptr_aux = ptr
self.dAux = tuple_type(stride_mnl)
return _Argument
@staticmethod
def match(node, problem_size: tuple):
if not node.is_output:
return False
if node.name in StoreImplBase.reserved_names:
return False
strideMN = node.store_tensor.stride[-2:]
if (strideMN[0] == 1 and strideMN[1] != 0 or
strideMN[0] != 0 and strideMN[1] == 1 ):
return True
else:
return False
class ReductionImplBase(StoreImplBase):
def __init__(self, node) -> None:
super().__init__(node)
self.element = node.store_tensor.element
self.element_compute = node.element_compute
self.reg_reduce_fn = self.node.reg_reduce_fn
self.gmem_reduce_fn = self.node.gmem_reduce_fn
self.round_style = node.round_style
self.stride_dtype = "int"
def get_reduce_identity(self):
"""
Return the reduction identity of the current reduce_fn
"""
maxes = {
DataType.f32: (2 ** 31) - 1,
DataType.f16: (2 ** 15),
DataType.s32: (2 ** 31) - 1,
DataType.s8: (2 ** 7) - 1
}
mins = {
DataType.f32: -maxes[DataType.f32],
DataType.f16: -maxes[DataType.f16],
DataType.s32: -maxes[DataType.s32],
DataType.s8: -maxes[DataType.s8]
}
if self.reg_reduce_fn == FunctionalOp.Maximum:
if self.element_compute not in mins:
raise Exception(f"No min entry for data type {self.element_compute}")
return to_ctype_value(mins[self.element_compute], self.element_compute)
elif self.reg_reduce_fn == FunctionalOp.Multiplies:
return to_ctype_value(1., self.element_compute)
elif self.reg_reduce_fn == FunctionalOp.Minimum:
if self.element_compute not in maxes:
raise Exception(f"No max entry for data type {self.element_compute}")
return to_ctype_value(maxes[self.element_compute], self.element_compute)
else:
return to_ctype_value(0., self.element_compute)
@property
def argument_type(self):
self.get_reduce_identity()
stride_mnl = self.get_stride_mnl()
name = self.name
tuple_type = tuple_factory(stride_mnl, self.stride_dtype)
element_compute = self.element_compute
reduce_identity = self.get_reduce_identity()
class _Argument(ctypes.Structure):
_fields_ = [
("ptr", ctypes.c_void_p),
("reduce_identity", dtype2ctype[element_compute]),
("dMNL", tuple_type)
]
def __init__(self, kwargs) -> None:
ptr = kwargs[name]
self.ptr = ptr
self.reduce_identity = reduce_identity
self.dMNL = tuple_type(stride_mnl)
return _Argument
class ColumnReductionImpl(ReductionImplBase):
@staticmethod
def match(node, problem_size: tuple):
if not node.is_output:
return False
if node.name in StoreImplBase.reserved_names:
return False
strideMN = node.store_tensor.stride[-2:]
if strideMN == (1, 0):
return True
else:
return False
class RowReductionImpl(ReductionImplBase):
@staticmethod
def match(node, problem_size: tuple):
if not node.is_output:
return False
if node.name in StoreImplBase.reserved_names:
return False
strideMN = node.store_tensor.stride[-2:]
if strideMN == (0, 1):
return True
else:
return False
class ScalarReductionImpl(ReductionImplBase):
@staticmethod
def match(node, problem_size: tuple):
if not node.is_output:
return False
if node.name in StoreImplBase.reserved_names:
return False
strideMN = node.store_tensor.stride[-2:]
if strideMN == (0, 0):
return True
else:
return False
class StoreNode(NodeBase):
"""
Store node
"""
possible_impls = [
AuxStoreImpl, RowReductionImpl,
ColumnReductionImpl, ScalarReductionImpl,
NoOpImpl, StoreDImpl
]
def __init__(self, name: str) -> None:
super().__init__(name)
self.op = "store"
self.is_output = False
self._store_tensor = None
@property
def store_tensor(self) -> Tensor:
"""
Return the output tensor (concept: cutlass.backend.evt.ir.tensor)
"""
return self._store_tensor
@store_tensor.setter
def store_tensor(self, kwargs):
"""
Setting the tensor
"""
self._store_tensor = Tensor(**kwargs)
def type_propagation(self, input_node_metas: 'list[NodeBase]'):
"""
The store nodes has element_output = element_input
"""
if self.is_output:
if self.store_tensor is None:
raise RuntimeError(f"The store tensor of node {self.name} is unknown.")
self.element = self.store_tensor.element
assert len(input_node_metas) == 1, "Store node can only have one input node"
self.element_output = input_node_metas[0].element_output
def broadcast_propagation(self, input_node_metas: 'list[NodeBase]'):
super().broadcast_propagation(input_node_metas)
if self.is_output:
self._store_tensor.broadcast(self.tensor.shape)
| python/cutlass/backend/evt/ir/store_nodes.py/0 | {
"file_path": "python/cutlass/backend/evt/ir/store_nodes.py",
"repo_id": "python",
"token_count": 4087
} | 40 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import copy
import ctypes
import enum
from cuda import cuda, cudart
from cutlass_library import SubstituteTemplate
import numpy as np
from cutlass_library import (
ComplexTransformTag,
DataType,
DataTypeNames,
DataTypeSize,
DataTypeTag,
EpilogueScheduleSuffixes,
EpilogueScheduleTag,
EpilogueScheduleType,
GemmKind,
GemmKindNames,
GemmUniversalMode,
KernelScheduleSuffixes,
KernelScheduleTag,
KernelScheduleType,
LayoutTag,
LayoutType,
MathOperation,
MathOperationTag,
OpcodeClass,
OpcodeClassNames,
OpcodeClassTag,
OperationKind,
ShortComplexLayoutNames,
ShortDataTypeNames,
ShortLayoutTypeNames,
SwizzlingFunctor,
SwizzlingFunctorTag,
TileSchedulerSuffixes,
TileSchedulerTag,
TileSchedulerType,
get_complex_from_real
)
from cutlass.backend.arguments import ArgumentBase
from cutlass.backend.c_types import (
GemmCoord_,
GemmCoordBatched_,
GenericMainloopArguments3x_,
StrideBatched_,
dim3_,
get_gemm_arguments,
get_gemm_arguments_3x,
get_gemm_arguments_streamk,
get_gemm_grouped_arguments,
get_mainloop_arguments_3x,
get_tile_scheduler_arguments_3x,
)
from cutlass.backend.library import (
ApiVersion,
EmissionType,
SchedulerMode,
SchedulerModeTag,
TensorDescription,
TileDescription,
api_version,
)
from cutlass.backend.memory_manager import device_mem_alloc, todevice
from cutlass.backend.operation import ExecutableOperation, LaunchConfiguration
from cutlass.backend.type_hint import GemmOperation, Tensor
from cutlass.backend.utils.device import device_sm_count
from cutlass.shape import GemmCoord, MatrixCoord
################################################################################
#
# Data structure modeling a GEMM operation
#
################################################################################
def leading_dimension(layout: LayoutType, shape: MatrixCoord) -> int:
"""
Returns the leading dimenson of a tensor with layout ``layout`` and shape ``shape``.
:param layout: layout of the tensor
:type layout: cutlass.shape.LayoutType
:param shape: shape of the tensor
:type shape: cutlass.shape.MatrixCoord
:return: leading dimension of the tensor
:rtype: int
"""
if layout == LayoutType.RowMajor:
return shape.column
elif layout == LayoutType.ColumnMajor:
return shape.row
def transpose_layout(layout: LayoutType) -> LayoutType:
if layout == LayoutType.ColumnMajor:
return LayoutType.RowMajor
elif layout == LayoutType.RowMajor:
return LayoutType.ColumnMajor
else:
raise ValueError(f"Unsupported Layout {layout}")
class GemmArguments2x(ArgumentBase):
"""
Argument wrapper for GEMM in CUTLASS 2. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationUniversal` |
:class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: GEMM problem size gemm(M, N, K)
:type operation: :class:`cutlass.shape.GemmCoord`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param gemm_mode: GEMM mode
:type gemm_mode: :class:`cutlass_library.GemmUniversalMode`
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
:param stream: cuda stream, defaults to cuda.cuda.CUstream(0)
:type stream: :class:`cuda.cuda.CUstream`
"""
def __init__(self, operation, problem_size, A, B, C, D, gemm_mode=GemmUniversalMode.Gemm, **kwargs):
self.operation = operation
self.layout_A = operation.A.layout
self.layout_B = operation.B.layout
self.layout_C = operation.C.layout
self.element_A = operation.A.element
self.element_B = operation.B.element
self.element_C = operation.C.element
if operation.C.layout in [LayoutType.RowMajorInterleaved32, LayoutType.ColumnMajorInterleaved32]:
raise Exception("Interleaved layout not currently supported")
if hasattr(self.operation.epilogue_functor, "visitor") and operation.arch != 90:
super().__init__(A, B, None, None, **kwargs)
else:
super().__init__(A, B, C, D, **kwargs)
if operation.switched:
self.problem_size = GemmCoord(problem_size.n, problem_size.m, problem_size.k)
self.ptr_A, self.ptr_B = self.ptr_B, self.ptr_A
else:
self.problem_size = problem_size
# If the number of elements in C = problem_size.n, C is treated as the bias
if hasattr(self, "tensor_c_numel"):
if self.tensor_c_numel == self.problem_size.n and self.problem_size.m != 1:
self.bias = True
self.lda = leading_dimension(self.layout_A, self.problem_size.mk)
self.ldb = leading_dimension(self.layout_B, self.problem_size.kn)
self.ldc = leading_dimension(self.layout_C, self.problem_size.mn)
self.ldd = self.ldc
if self.bias:
self.ldc = 0
if "output_op" in kwargs.keys() and gemm_mode != GemmUniversalMode.GemmSplitKParallel:
self.output_op = kwargs["output_op"]
else:
if self.operation.epilogue_functor.element_epilogue in [DataType.s8, DataType.s32, DataType.u8, DataType.u32]:
dtype = int
else:
dtype = float
self.output_op = self.operation.epilogue_type(dtype(1.0), dtype(0.0))
self.gemm_mode = gemm_mode
if gemm_mode in [GemmUniversalMode.Gemm, GemmUniversalMode.GemmSplitKParallel]:
if "split_k_slices" in kwargs.keys():
self.batch_count = kwargs["split_k_slices"]
else:
self.batch_count = 1
self.split_k_slices = self.batch_count
if gemm_mode in [GemmUniversalMode.Batched, GemmUniversalMode.Array]:
if "batch" in kwargs.keys():
self.batch_count = kwargs["batch"]
else:
self.batch_count = 1
if "batch_strides" in kwargs:
self.batched_stride_A = kwargs["batch_strides"]["A"]
self.batched_stride_B = kwargs["batch_strides"]["B"]
self.batched_stride_C = kwargs["batch_strides"]["C"]
self.batched_stride_D = kwargs["batch_strides"]["D"]
else:
self.batched_stride_A = self.problem_size.m * self.problem_size.k
self.batched_stride_B = self.problem_size.n * self.problem_size.k
self.batched_stride_C = self.problem_size.m * self.problem_size.n
self.batched_stride_D = self.problem_size.m * self.problem_size.n
if self.bias:
self.batched_stride_C = self.problem_size.n
if gemm_mode == GemmUniversalMode.Array:
self.ptr_A_array = []
self.ptr_B_array = []
self.ptr_C_array = []
self.ptr_D_array = []
ptr_A_addr = int(self.ptr_A)
ptr_B_addr = int(self.ptr_B)
ptr_C_addr = int(self.ptr_C)
ptr_D_addr = int(self.ptr_D)
stride_A = self.batched_stride_A * DataTypeSize[self.element_A] // 8
stride_B = self.batched_stride_B * DataTypeSize[self.element_B] // 8
stride_C = self.batched_stride_C * DataTypeSize[self.element_C] // 8
stride_D = self.batched_stride_D * DataTypeSize[self.element_C] // 8
for _ in range(self.batch_count):
self.ptr_A_array.append(ptr_A_addr)
self.ptr_B_array.append(ptr_B_addr)
self.ptr_C_array.append(ptr_C_addr)
self.ptr_D_array.append(ptr_D_addr)
ptr_A_addr += stride_A
ptr_B_addr += stride_B
ptr_C_addr += stride_C
ptr_D_addr += stride_D
self.ptr_A_array_buffer = todevice(self.ptr_A_array, dtype=np.int64)
self.ptr_B_array_buffer = todevice(self.ptr_B_array, dtype=np.int64)
self.ptr_C_array_buffer = todevice(self.ptr_C_array, dtype=np.int64)
self.ptr_D_array_buffer = todevice(self.ptr_D_array, dtype=np.int64)
if isinstance(self.operation, GemmOperationUniversal):
self.initialize()
def get_arguments(self):
problem_size_ = self.problem_size.ctype
grid_tiled_shape_ = GemmCoord(
self.grid_tiled_shape.x,
self.grid_tiled_shape.y,
self.grid_tiled_shape.z ).ctype
if self.gemm_mode == GemmUniversalMode.Array:
arguments = self.operation.argument_type(
# Arguments from UniversalArgumentsBase
self.gemm_mode,
problem_size_,
self.batch_count,
0,
# Remaining arguments
self.output_op,
int(self.ptr_A_array_buffer.ptr),
int(self.ptr_B_array_buffer.ptr),
int(self.ptr_C_array_buffer.ptr),
int(self.ptr_D_array_buffer.ptr),
0, 0, 0,
self.lda, self.ldb, self.ldc, self.ldd,
self.lda, self.ldb, self.ldc, self.ldd,
0, 0, 0
)
else:
arguments = self.operation.argument_type(
# Arguments from UniversalArgumentsBase
self.gemm_mode, problem_size_, self.batch_count, self.batched_stride_D,
# Remaining arguments
self.output_op,
int(self.ptr_A),
int(self.ptr_B),
int(self.ptr_C),
int(self.ptr_D),
self.batched_stride_A,
self.batched_stride_B,
self.batched_stride_C,
self.lda, self.ldb, self.ldc, self.ldd,
self.lda, self.ldb, self.ldc, self.ldd,
0, 0, 0
)
self.arguments = arguments, grid_tiled_shape_, self.gemm_k_size
def initialize(self):
launch_config = self.operation.rt_module.plan(self)
# Get the host and device workspace
device_workspace_size = self.operation.rt_module.get_device_workspace_size(self)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
device_workspace = 0
if workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.GemmSplitKParallel:
# In GEMM splik-K parallel, the D pointer is redirected to the workspace
self.ptr_D = cuda.CUdeviceptr(workspace_ptr)
elif workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.Gemm:
device_workspace = workspace_ptr
self.get_arguments()
arguments, grid_tiled_shape, gemm_k_size = self.arguments
res_arg = self.operation.rt_module.get_args(
ctypes.byref(arguments), ctypes.c_void_p(int(device_workspace)))
host_workspace = bytearray(res_arg.contents)
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = launch_config
def sync(self, stream_sync=True):
super().sync(stream_sync)
if hasattr(self.output_op, "sync"):
self.output_op.sync()
class GemmArguments2xStreamK(GemmArguments2x):
"""
Argument wrapper for stream-K GEMMs in CUTLASS 2. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationUniversal` |
:class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: GEMM problem size gemm(M, N, K)
:type operation: :class:`cutlass.shape.GemmCoord`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param gemm_mode: GEMM mode
:type gemm_mode: :class:`cutlass_library.GemmUniversalMode`
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
"""
def __init__(self, operation, problem_size, A, B, C, D, gemm_mode=GemmUniversalMode.Gemm, **kwargs):
if gemm_mode not in [GemmUniversalMode.Gemm, GemmUniversalMode.Batched]:
raise Exception(f"Unsupported GEMM mode {gemm_mode}.")
super().__init__(operation, problem_size, A, B, C, D, gemm_mode, **kwargs)
def get_arguments(self):
batch_stride_A = self.problem_size.m * self.problem_size.k
batch_stride_B = self.problem_size.k * self.problem_size.n
batch_stride_C = self.problem_size.m * self.problem_size.n
batch_stride_D = self.problem_size.m * self.problem_size.n
arguments = self.operation.argument_type(
self.gemm_mode,
GemmCoord_(self.problem_size.m, self.problem_size.n, self.problem_size.k),
self.batch_count,
self.output_op,
int(self.ptr_A),
int(self.ptr_B),
int(self.ptr_C),
int(self.ptr_D),
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D,
self.lda, self.ldb, self.ldc, self.ldd, # strides
self.lda, self.ldb, self.ldc, self.ldd,
-1, # avail_sms
)
return arguments
def initialize(self):
# Get the host and device workspace
device_workspace_size = self.operation.rt_module.get_device_workspace_size(
self,
device_sm_count(),
self.operation.rt_module.occupancy
)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
device_workspace = 0
if workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.GemmSplitKParallel:
# In GEMM splik-K parallel, the D pointer is redirected to the workspace
self.ptr_D = cuda.CUdeviceptr(workspace_ptr)
elif workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.Gemm:
device_workspace = workspace_ptr
arguments = self.get_arguments()
res_arg = self.operation.rt_module.get_args(
ctypes.byref(arguments),
ctypes.c_void_p(int(device_workspace)),
device_sm_count(),
self.operation.rt_module.occupancy
)
host_workspace = bytearray(res_arg.contents)
grid = self.operation.rt_module.get_grid_shape(
ctypes.byref(arguments),
device_sm_count(),
self.operation.rt_module.occupancy
)
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = LaunchConfiguration(
[grid.m, grid.n, grid.k],
[self.operation.rt_module.threads, 1, 1],
self.operation.rt_module.shared_memory_capacity
)
class GemmArguments3x(GemmArguments2x):
"""
Argument wrapper for GEMM in CUTLASS 3. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationUniversal` |
:class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: GEMM problem size gemm(M, N, K)
:type operation: :class:`cutlass.shape.GemmCoord`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param gemm_mode: GEMM mode
:type gemm_mode: GemmUniversalMode
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
"""
def __init__(self, operation, problem_size, A, B, C, D, gemm_mode=GemmUniversalMode.Gemm, **kwargs):
if gemm_mode not in [GemmUniversalMode.Gemm, GemmUniversalMode.Batched]:
raise Exception(f"Unsupported GEMM mode {gemm_mode}.")
super().__init__(operation, problem_size, A, B, C, D, gemm_mode, **kwargs)
def get_arguments(self):
mainloop_args = get_mainloop_arguments_3x(
self.operation.tile_description.kernel_schedule,
self.operation.A.element,
self.operation.B.element,
self.operation.A.alignment,
self.operation.B.alignment
)
scheduler_args = get_tile_scheduler_arguments_3x(self.operation.tile_description.tile_scheduler)
uses_default_epilogue = self.operation.rt_module.uses_default_epilogue()
argument_type, epilogue_args, epilogue_type, hw_info = get_gemm_arguments_3x(
mainloop_args, self.operation.epilogue_functor, scheduler_args, uses_default_epilogue)
problem_size_ = GemmCoordBatched_(self.problem_size, self.batch_count)
if self.batch_count > 1:
bsA = self.batched_stride_A
bsB = self.batched_stride_B
bsC = self.batched_stride_C
bsD = self.batched_stride_D
else:
bsA = 0
bsB = 0
bsC = 0
bsD = 0
stride_A = StrideBatched_(self.lda, bsA)
stride_B = StrideBatched_(self.ldb, bsB)
stride_C = StrideBatched_(self.ldc, bsC)
stride_D = StrideBatched_(self.ldd, bsD)
# Superset of potential mainloop arguments
generic_args = GenericMainloopArguments3x_(
int(self.ptr_A),
stride_A,
int(self.ptr_B),
stride_B,
4 # mma_promotion_interval
)
# Set of mainloop arguments needed for this kernel
mainloop = mainloop_args.from_generic_mainloop_args(generic_args)
if not uses_default_epilogue and hasattr(self.output_op, "to_evt_params"):
self.output_op = self.output_op.to_evt_params()
epilogue = epilogue_args(
self.output_op,
int(self.ptr_C),
stride_C,
int(self.ptr_D),
stride_D,
)
# Set hardware info
hw_info_ = hw_info(
0, device_sm_count(),
)
self.arguments = argument_type(
int(self.gemm_mode),
problem_size_,
mainloop,
epilogue,
hw_info_,
scheduler_args
)
return self.arguments
def initialize(self):
# Get the host and evice workspace
device_workspace_size = self.operation.rt_module.get_device_workspace_size(self)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
device_workspace = 0
if workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.GemmSplitKParallel:
# In GEMM splik-K parallel, the D pointer is redirected to the workspace
self.ptr_D = cuda.CUdeviceptr(workspace_ptr)
elif workspace_ptr is not None and self.gemm_mode == GemmUniversalMode.Gemm:
device_workspace = workspace_ptr
self.get_arguments()
res_arg = self.operation.rt_module.get_args(
ctypes.byref(self.arguments),
ctypes.c_void_p(int(device_workspace)),
)
host_workspace = bytearray(res_arg.contents)
grid = self.operation.rt_module.get_grid_shape(
ctypes.byref(self.arguments),
ctypes.c_void_p(int(device_workspace)),
)
block = self.operation.rt_module.get_block_shape()
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = LaunchConfiguration(
[grid.x, grid.y, grid.z],
[block.x, block.y, block.z],
self.operation.rt_module.shared_memory_capacity,
)
def GemmArguments(operation, problem_size, A, B, C, D, gemm_mode=GemmUniversalMode.Gemm, **kwargs):
"""
Argument wrapper for GEMM in CUTLASS 2 or 3. It returns either 2x arguments
or 3x arguments depending on the `arch` field specified in `operation`.
:param operation: the GEMM operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationUniversal` |
:class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: GEMM problem size gemm(M, N, K)
:type operation: :class:`cutlass.shape.GemmCoord`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param gemm_mode: GEMM mode
:type gemm_mode: :class:`cutlass_library.GemmUniversalMode`
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
"""
if operation.swizzling_functor == SwizzlingFunctor.StreamK:
if operation.api == ApiVersion.v3x:
raise Exception("Stream K is currently only supported in CUTLASS 2.x")
ArgClass = GemmArguments2xStreamK
else:
ArgClass = GemmArguments3x if operation.api == ApiVersion.v3x else GemmArguments2x
return ArgClass(operation, problem_size, A, B, C, D, gemm_mode, **kwargs)
class GemmGroupedArguments:
"""
Argument wrapper for GEMM Grouped. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM Grouped operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: list of GEMM problem size gemm(M, N, K)
:type operation: list[:class:`cutlass.shape.GemmCoord`]
:param A: list of tensor A
:type A: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param B: list of tensor B
:type B: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param C: list of tensor C
:type C: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param D: list of tensor D
:type D: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
:param stream: cuda stream, defaults to cuda.cuda.CUstream(0)
:type stream: :class:`cuda.cuda.CUstream`
"""
def __init__(self, operation, problem_sizes, A, B, C, D, **kwargs):
# Get number of problems in the group
self.problem_count = len(problem_sizes)
# Check the input arguments
assert len(A) == self.problem_count
assert len(B) == self.problem_count
assert len(C) == self.problem_count
assert len(D) == self.problem_count
problem_size_host = []
self.ptr_A_host = []
self.ptr_B_host = []
self.ptr_C_host = []
self.ptr_D_host = []
lda_host = []
ldb_host = []
ldc_host = []
ldd_host = []
self.partitions = 1
self.operation = operation
# Get the threadblock
threadblock_shape = operation.tile_description.threadblock_shape
self.threadblock_shape = GemmCoord(
threadblock_shape[0],
threadblock_shape[1],
threadblock_shape[2],
)
self.threadblock_swizzle = operation.swizzling_functor
self.total_tiles = 0
self.gemm_arguments = []
self.stream = kwargs.get("stream", cuda.CUstream(0))
# Process the input arguments
for idx, problem_size in enumerate(problem_sizes):
M, N, K = problem_size.m, problem_size.n, problem_size.k
temp_argument = GemmArguments2x(
operation=operation,
problem_size=GemmCoord(M, N, K),
A=A[idx], B=B[idx], C=C[idx], D=D[idx])
self.gemm_arguments.append(temp_argument)
problem_size_host.append(
[temp_argument.problem_size.m,
temp_argument.problem_size.n,
temp_argument.problem_size.k]
)
self.ptr_A_host.append(int(temp_argument.ptr_A))
lda_host.append(temp_argument.lda)
self.ptr_B_host.append(int(temp_argument.ptr_B))
ldb_host.append(temp_argument.ldb)
self.ptr_C_host.append(int(temp_argument.ptr_C))
ldc_host.append(temp_argument.ldc)
self.ptr_D_host.append(int(temp_argument.ptr_D))
ldd_host.append(temp_argument.ldd)
# Get number of tiles
grid = self.operation.rt_module.get_grid_shape(
self.operation.rt_module.get_tiled_shape(
temp_argument.problem_size.ctype,
self.threadblock_shape.ctype,
temp_argument.batch_count
)
)
self.total_tiles += grid.x * grid.y * grid.z
self.problem_size_buffer = todevice(problem_size_host, np.int32)
self.ptr_A_buffer = todevice(self.ptr_A_host, np.int64)
self.ptr_B_buffer = todevice(self.ptr_B_host, np.int64)
self.ptr_C_buffer = todevice(self.ptr_C_host, np.int64)
self.ptr_D_buffer = todevice(self.ptr_D_host, np.int64)
self.lda_buffer = todevice(lda_host, np.int64)
self.ldb_buffer = todevice(ldb_host, np.int64)
self.ldc_buffer = todevice(ldc_host, np.int64)
self.ldd_buffer = todevice(ldd_host, np.int64)
if "output_op" in kwargs.keys():
self.alpha = kwargs["output_op"].alpha
self.beta = kwargs["output_op"].beta
else:
self.alpha = 1.0
self.beta = 0.0
if "output_op" in kwargs.keys():
self.output_op = kwargs["output_op"]
else:
self.output_op = self.operation.epilogue_type(1.0, 0.0)
# Get host problem size
self.host_problem_size_ptr = np.array(problem_size_host, dtype=np.int32).__array_interface__["data"][0]
self.arguments = self.get_arguments()
self.initialize()
def get_arguments(self):
return self.operation.argument_type(
self.problem_size_buffer.ptr,
self.problem_count,
self.total_tiles,
self.output_op,
self.ptr_A_buffer.ptr,
self.ptr_B_buffer.ptr,
self.ptr_C_buffer.ptr,
self.ptr_D_buffer.ptr,
self.lda_buffer.ptr,
self.ldb_buffer.ptr,
self.ldc_buffer.ptr,
self.ldd_buffer.ptr,
ctypes.c_void_p(int(self.host_problem_size_ptr)),
)
def initialize(self):
# Get launch configuration
launch_config = self.operation.rt_module.plan(self)
# Get the host and evice workspace
device_workspace_size = self.operation.rt_module.get_device_workspace_size(self)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
if self.operation.precompute_mode == SchedulerMode.Host:
device_workspace_ptr = self.operation.rt_module.host_precompute(
self, self.operation.rt_module.get_workspace_size(self),)
else:
device_workspace_ptr = 0
result = self.operation.rt_module.get_args(
ctypes.byref(self.arguments),
self.total_tiles,
ctypes.c_void_p(int(device_workspace_ptr)),
)
host_workspace = bytearray(result.contents)
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = launch_config
def sync(self):
err, = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
for arg in self.gemm_arguments:
arg.sync(stream_sync=False)
################################################################################
# Base class for GEMM runtime module
################################################################################
class GemmRTbase(ExecutableOperation):
"""
GemmRT manages the CUTLASS runtime components
"""
KernelTemplate = r"""
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix}::invoke(params, *shared_storage);
}
"""
def __init__(self, operation: "GemmOperation"):
super().__init__(operation)
self.operation = operation
threadblock_shape = operation.tile_description.threadblock_shape
self.threadblock_shape = GemmCoord(
threadblock_shape[0], threadblock_shape[1], threadblock_shape[2])
self.threadblock_swizzle = operation.swizzling_functor
# Threads per threadblock
self.threads = operation.tile_description.num_threads
def emit(self):
return self.emitter.emit(self.operation)
def can_implement(self, configuration, arguments):
raise NotImplementedError()
def get_host_workspace_size(self, arguments):
raise NotImplementedError()
def get_device_workspace_size(self, arguments):
return 0
def initialize(self):
err, = cuda.cuFuncSetAttribute(
self.kernel,
attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
value=self.shared_memory_capacity)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(
f"CUDA error on call to cuFuncSetAttribute: {cuda.cuGetErrorString(err)[1]}"
)
################################################################################
# Runtime module for GEMM Universal
################################################################################
class GemmRTUniversal(GemmRTbase):
"""
GemmRTUniversal manages the CUTLASS runtime components
"""
HostTemplate = r"""
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
// Get the params as byte array
char* ${operation_name}_get_params(${operation_name}_base::Arguments* argument, int* workspace){
${operation_name}_base::Params* params;
params = new ${operation_name}_base::Params(*argument,
-1, // SM count. Only used for stream-K
-1 // Occupancy. Only used for stream-K
);
// Semaphore holds the pointer to the workspace in the Params struct
params->semaphore = workspace;
char *bytes = ((char*)(params));
char *output = new char[sizeof(${operation_name}_base::Params)];
for (unsigned int i = 0; i < sizeof(${operation_name}_base::Params); i ++)
output[i] = bytes[i];
return output;
}
cutlass::gemm::GemmCoord ${operation_name}_get_tiled_shape(
cutlass::gemm::GemmCoord problem_size, cutlass::gemm::GemmCoord tile_size, int split_k_slices) {
return ${operation_name}_base::ThreadblockSwizzle::get_tiled_shape(
problem_size, tile_size, split_k_slices);
}
dim3 ${operation_name}_get_grid_shape(cutlass::gemm::GemmCoord tiled_shape) {
return ${operation_name}_base::ThreadblockSwizzle::get_grid_shape(tiled_shape);
}
}
"""
def __init__(self, operation):
super(GemmRTUniversal, self).__init__(operation)
self.extra_funcs = {
"get_tiled_shape": GemmCoord_,
"get_grid_shape": dim3_,
}
self.emitter = EmitGemmUniversalInstance(
"_type", operation.direct_store)
self.argument_type, self.epilogue_type = get_gemm_arguments(operation.epilogue_functor)
self.argtype = [
ctypes.POINTER(self.argument_type),
ctypes.POINTER(GemmCoord_), ctypes.c_int, ctypes.c_void_p
]
def plan(self, arguments):
grid = self.get_tiled_shape(
arguments.problem_size.ctype,
self.threadblock_shape.ctype,
arguments.batch_count
)
gemm_k_size = arguments.problem_size.k
if arguments.gemm_mode in [GemmUniversalMode.Gemm, GemmUniversalMode.GemmSplitKParallel]:
alignk = max(max(128 // DataTypeSize[self.operation.A.element],
128 // DataTypeSize[self.operation.B.element]), 1)
gemm_k_size = (((arguments.problem_size.k + arguments.batch_count - 1) //
arguments.batch_count + alignk - 1) // alignk) * alignk
if gemm_k_size:
grid_z = (arguments.problem_size.k + gemm_k_size - 1) // gemm_k_size
grid = GemmCoord(grid.m, grid.n, grid_z).ctype
arguments.grid_tiled_shape = dim3_(grid.m, grid.n, grid.k)
grid = self.get_grid_shape(grid)
arguments.gemm_k_size = gemm_k_size
return LaunchConfiguration(
[grid.x, grid.y, grid.z],
[self.threads, 1, 1],
self.shared_memory_capacity)
def get_device_workspace_size(self, arguments: GemmArguments):
workspace_bytes = 0
if arguments.gemm_mode == GemmUniversalMode.GemmSplitKParallel:
workspace_bytes = (DataTypeSize[arguments.operation.C.element]
* arguments.batched_stride_D * arguments.grid_tiled_shape.z // 8)
elif (arguments.gemm_mode == GemmUniversalMode.Gemm and
arguments.split_k_slices > 1):
workspace_bytes = 4 * arguments.grid_tiled_shape.x * arguments.grid_tiled_shape.y
return workspace_bytes
class GemmRTUniversalStreamK(GemmRTUniversal):
"""
Manages the CUTLASS runtime components for 2.x stream K kernels
"""
HostTemplate = r"""
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
using GemmType = ${operation_name}_base;
// Get the params as byte array
char* ${operation_name}_get_params(GemmType::Arguments* argument, int* workspace,
int sm_count, int occupancy) {
GemmType::Params* params;
params = new GemmType::Params(*argument, sm_count, occupancy);
params->init_workspace(workspace);
char *bytes = ((char*)(params));
char *output = new char[sizeof(GemmType::Params)];
for (unsigned int i = 0; i < sizeof(GemmType::Params); i ++)
output[i] = bytes[i];
return output;
}
dim3 ${operation_name}_get_grid_shape(GemmType::Arguments* args, int device_sms, int sm_occupancy) {
typename GemmType::Params params(*args, device_sms, sm_occupancy);
return params.get_grid_dims();
}
uint64_t ${operation_name}_get_kernel_workspace_size(GemmType::Arguments* args, int device_sms, int sm_occupancy) {
typename GemmType::Params params(*args, device_sms, sm_occupancy);
return params.get_workspace_size();
}
}
"""
def __init__(self, operation: "GemmOperation"):
super(GemmRTUniversalStreamK, self).__init__(operation)
self.extra_funcs = {
"get_grid_shape": GemmCoord_,
"get_kernel_workspace_size": ctypes.c_uint64,
}
self._occupancy = None
self.argument_type, self.epilogue_type = get_gemm_arguments_streamk(operation.epilogue_functor)
@property
def occupancy(self):
if self._occupancy is None:
err, self._occupancy = cuda.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
self.kernel, self.threads, self.shared_memory_capacity,
cuda.CUoccupancy_flags.CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(
"CUDA error on call to cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags: "
f"{cuda.cuGetErrorString(err)[1]}")
return self._occupancy
def get_device_workspace_size(self, arguments: GemmArguments2xStreamK, device_sms: int, sm_occupancy: int):
return self.get_kernel_workspace_size(ctypes.byref(arguments.get_arguments()), device_sms, sm_occupancy)
################################################################################
# Runtime module for GEMM Universal within CUTLASS 3
################################################################################
class GemmRTUniversal3x(GemmRTUniversal):
"""
Manages the CUTLASS runtime components for 3.x kernels
"""
KernelTemplate = r"""
using Operator = ${operation_name}${operation_suffix};
extern "C"
__global__ __launch_bounds__(Operator::MaxThreadsPerBlock, Operator::MinBlocksPerMultiprocessor)
void ${operation_name}(__grid_constant__ typename Operator::Params const params) {
// Dynamic shared memory base pointer
extern __shared__ char smem[];
// Declare pointer to dynamic shared memory.
Operator op;
op(params, smem);
}
"""
HostTemplate = r"""
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return ${operation_name}${operation_suffix}::SharedStorageSize;
}
using GemmType = ${operation_name}_base;
bool ${operation_name}_uses_default_epilogue() {
return std::is_same_v<GemmType::CollectiveEpilogue::DispatchPolicy, cutlass::gemm::EpilogueDefault>;
}
// Get the workspace size
uint64_t ${operation_name}_get_kernel_workspace_size(GemmType::Arguments* argument) {
return GemmType::get_workspace_size(*argument);
}
// Get the params as byte array
char* ${operation_name}_get_params(GemmType::Arguments* argument, int* workspace){
GemmType::Params params = GemmType::to_underlying_arguments(*argument, workspace);
char *bytes = ((char*)(¶ms));
char *output = new char[sizeof(GemmType::Params)];
for (unsigned int i = 0; i < sizeof(GemmType::Params); i ++)
output[i] = bytes[i];
return output;
}
// Get the total number of blocks for a persistent kernel
uint64_t ${operation_name}_get_persistent_tiled_blk_shape_mnl(GemmType::ProblemShape problem) {
auto problem_shape_MNKL = append<4>(problem, Int<1>{});
auto [problem_blocks_m, problem_blocks_n, problem_blocks_l] =
cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::get_tiled_cta_shape_mnl(
problem_shape_MNKL, GemmType::TileShape{}, GemmType::DispatchPolicy::ClusterShape{});
return problem_blocks_m * problem_blocks_n * problem_blocks_l;
}
// Get the grid shape
dim3 ${operation_name}_get_grid_shape(GemmType::Arguments* args, int* workspace) {
auto tmp_params = GemmType::to_underlying_arguments(*args, workspace);
return GemmType::get_grid_shape(tmp_params);
}
// Get the block shape
dim3 ${operation_name}_get_block_shape() {
return GemmType::get_block_shape();
}
}
"""
def __init__(self, operation):
super(GemmRTUniversal3x, self).__init__(operation)
self.extra_funcs = {
"get_grid_shape": dim3_,
"get_block_shape": dim3_,
"get_persistent_tiled_blk_shape_mnl": ctypes.c_uint64,
"get_kernel_workspace_size": ctypes.c_uint64,
"uses_default_epilogue": ctypes.c_bool,
}
self.emitter = EmitGemmUniversalInstance3x("_type")
def get_device_workspace_size(self, arguments: GemmArguments3x):
return self.get_kernel_workspace_size(ctypes.byref(arguments.get_arguments()))
class EmitGemmUniversalInstance3x:
"""Responsible for emitting a CUTLASS 3 template definition"""
def __init__(self, operation_suffix=""):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cute/tensor.hpp",
"cute/atom/mma_atom.hpp",
"cutlass/numeric_types.h",
"cutlass/gemm/collective/collective_builder.hpp",
"cutlass/gemm/kernel/sm90_tile_scheduler.hpp",
"cutlass/gemm/kernel/gemm_universal.hpp",
"cutlass/epilogue/collective/collective_builder.hpp",
"cutlass/epilogue/collective/default_epilogue.hpp",
"cutlass/epilogue/thread/linear_combination.h"
]
self.gemm_template_kernel = """
using namespace cute;
using CollectiveEpilogue =
typename cutlass::epilogue::collective::CollectiveBuilder<
${arch}, ${opcode_class},
cute::Shape<cute::_${threadblock_shape_m}, cute::_${threadblock_shape_n}, cute::_${threadblock_shape_k}>,
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
cutlass::epilogue::collective::EpilogueTileAuto,
${element_accumulator}, ${element_epilogue},
${element_c}, ${layout_c}, ${align_c},
${element_d}, ${layout_d}, ${align_d},
${epilogue_schedule}
>::CollectiveOp;
using CollectiveMainloop =
typename cutlass::gemm::collective::CollectiveBuilder<
${arch}, ${opcode_class},
${element_a}, ${layout_a}, ${align_a},
${element_b}, ${layout_b}, ${align_b},
${element_accumulator},
cute::Shape<cute::_${threadblock_shape_m}, cute::_${threadblock_shape_n}, cute::_${threadblock_shape_k}>,
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
${stage_count_type},
${kernel_schedule}
>::CollectiveOp;
// Gemm operator ${operation_name}
using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
${tile_scheduler}
>;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_kernel_visitor = """
using namespace cute;
${callback_decl}
using CollectiveEpilogue =
typename cutlass::epilogue::collective::CollectiveBuilder<
${arch}, ${opcode_class},
cute::Shape<cute::_${threadblock_shape_m}, cute::_${threadblock_shape_n}, cute::_${threadblock_shape_k}>,
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
cutlass::epilogue::collective::EpilogueTileAuto,
${element_accumulator}, ${element_epilogue},
ElementC, StrideC, ${align_c},
ElementD, StrideD, ${align_d},
${epilogue_schedule},
${callback_name}
>::CollectiveOp;
using CollectiveMainloop =
typename cutlass::gemm::collective::CollectiveBuilder<
${arch}, ${opcode_class},
${element_a}, ${layout_a}, ${align_a},
${element_b}, ${layout_b}, ${align_b},
${element_accumulator},
cute::Shape<cute::_${threadblock_shape_m}, cute::_${threadblock_shape_n}, cute::_${threadblock_shape_k}>,
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
${stage_count_type},
${kernel_schedule}
>::CollectiveOp;
// Gemm operator ${operation_name}
using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
${tile_scheduler}
>;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_device = self.gemm_template_kernel + """
// Define device-level operator
using DeviceKernel = cutlass::gemm::device::GemmUniversalAdapter<${operation_name}${operation_suffix}>;
"""
def emit(self, operation):
# Support built-in epilogue functors or user-defined functions
if operation.tile_description.stages is None or operation.tile_description.stages == 0:
stage_count_type = "cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>"
else:
stage_count_type = "_" + str(operation.tile_description.stages)
if operation.emission_type == EmissionType.Kernel:
gemm_template = self.gemm_template_kernel
else:
gemm_template = self.gemm_template_device
kschedule = KernelScheduleType.ScheduleAuto
eschedule = EpilogueScheduleType.ScheduleAuto
tschedule = TileSchedulerType.Default
if operation.tile_description.kernel_schedule is not None:
kschedule = operation.tile_description.kernel_schedule
if operation.tile_description.epilogue_schedule is not None:
eschedule = operation.tile_description.epilogue_schedule
if operation.tile_description.tile_scheduler is not None:
tschedule = operation.tile_description.tile_scheduler
values = {
"operation_name": operation.procedural_name(),
"operation_suffix": self.operation_suffix,
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[operation.A.layout],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[operation.B.layout],
"element_c": DataTypeTag[operation.C.element],
"layout_c": LayoutTag[operation.C.layout],
"element_d": DataTypeTag[operation.epilogue_functor.element_output],
"layout_d": LayoutTag[operation.C.layout],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"element_epilogue": DataTypeTag[operation.epilogue_functor.element_epilogue],
"opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"cluster_m": str(operation.tile_description.cluster_shape[0]),
"cluster_n": str(operation.tile_description.cluster_shape[1]),
"cluster_k": str(operation.tile_description.cluster_shape[2]),
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
"align_c": str(operation.C.alignment),
"align_d": str(operation.C.alignment),
"stage_count_type": stage_count_type,
"kernel_schedule": KernelScheduleTag[kschedule],
"epilogue_schedule": EpilogueScheduleTag[eschedule],
"tile_scheduler": TileSchedulerTag[tschedule]
}
if hasattr(operation.epilogue_functor, "visitor"):
callback_name, callback_decl = operation.epilogue_functor.emit(operation)
values["callback_name"] = callback_name
values["callback_decl"] = callback_decl
return SubstituteTemplate(self.gemm_template_kernel_visitor, values)
else:
values["epilogue_functor"] = operation.epilogue_functor.emit()
return SubstituteTemplate(gemm_template, values)
###################################################################################################
# Runtime module for GEMM Grouped
###################################################################################################
class GemmRTGrouped(GemmRTbase):
"""
GemmRTGrouped manages the CUTLASS runtime components
"""
KernelTemplate = r"""
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix} op;
op(params, *shared_storage);
}
"""
HostTemplate = r"""
extern "C" {
// precompute scheduling information
char * ${operation_name}_precompute(${operation_name}_base::Arguments const &args, int tile_count, size_t workspace_bytes) {
char* host_workspace = new char[workspace_bytes];
${operation_name}_base::ProblemVisitor::host_precompute(
args.host_problem_sizes,
args.problem_count,
args.threadblock_count,
(void*)host_workspace
);
return host_workspace;
}
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
// Get the params as byte array
char* ${operation_name}_get_params(${operation_name}_base::Arguments* argument, int tile_count, void* workspace=nullptr){
${operation_name}_base::Params* params;
params = new ${operation_name}_base::Params(*argument, workspace, tile_count);
char *bytes = ((char*)(params));
char *output = new char[sizeof(${operation_name}_base::Params)];
for (unsigned int i = 0; i < sizeof(${operation_name}_base::Params); i ++)
output[i] = bytes[i];
return output;
}
cutlass::gemm::GemmCoord ${operation_name}_get_tiled_shape(
cutlass::gemm::GemmCoord problem_size, cutlass::gemm::GemmCoord tile_size, int split_k_slices) {
return ${operation_name}_base::ThreadblockSwizzle::get_tiled_shape(
problem_size, tile_size, split_k_slices);
}
dim3 ${operation_name}_get_grid_shape(cutlass::gemm::GemmCoord tiled_shape) {
return ${operation_name}_base::ThreadblockSwizzle::get_grid_shape(tiled_shape);
}
}
"""
def __init__(self, operation: "GemmOperation"):
super(GemmRTGrouped, self).__init__(operation)
self.extra_funcs = {
"precompute": None,
"get_tiled_shape": GemmCoord_,
"get_grid_shape": dim3_,
}
self.emitter = EmitGemmGroupedInstance("_type")
self.argument_type, self.epilogue_type = get_gemm_grouped_arguments(operation.epilogue_functor)
self.argtype = [ctypes.POINTER(self.argument_type), ctypes.c_int, ctypes.c_void_p]
def host_precompute(self, arguments, workspace_bytes):
self.precompute.argtype = [
self.argtype[0], ctypes.c_int, ctypes.c_longlong]
self.precompute.restype = ctypes.POINTER(ctypes.c_byte * workspace_bytes)
problem_info = self.precompute(
ctypes.byref(arguments.arguments),
arguments.total_tiles,
workspace_bytes)
problem_info_array = bytearray(problem_info.contents)
# copy to device memory
return todevice(problem_info_array).ptr
def plan(self, arguments):
return LaunchConfiguration(
[arguments.total_tiles, 1, 1],
[self.threads, 1, 1],
self.shared_memory_capacity,
)
def get_workspace_size(self, arguments):
if self.operation.precompute_mode == SchedulerMode.Device:
return 0
elif self.operation.precompute_mode == SchedulerMode.Host:
total_tiles = arguments.total_tiles
entries_per_block = 1
return 8 * entries_per_block * total_tiles # three int32_t
################################################################################
# Runtime module for GEMM and grouped GEMM
################################################################################
class GemmOperationBase:
"""
CUTLASS GEMM operation
"""
def __init__(
self, gemm_kind, arch, tile_description: TileDescription,
A: TensorDescription, B: TensorDescription, C: TensorDescription,
epilogue_functor, swizzling_functor=SwizzlingFunctor.Identity1,
api=ApiVersion.v2x, emission_type=EmissionType.Kernel, **kwargs):
self.operation_kind: OperationKind = OperationKind.Gemm
self.arch: int = arch
self.tile_description: TileDescription = tile_description
self.gemm_kind: GemmKind = gemm_kind
self.api = api
self.prefix = "3x" if self.api == ApiVersion.v3x else ""
self.emission_type = emission_type
# Optionally swap the TensorDescriptions for operands A and B and transpose their
# layouts. This is needed to mimic the transpose performed by device::GemmUniversal.
# The code below uses deep copy to avoid overwritting the original TensorDescription
self.switched = (self.api != ApiVersion.v3x and
self.emission_type == EmissionType.Kernel and
C.layout == LayoutType.ColumnMajor)
self.A, self.B, self.C = GemmOperationBase.get_operands(A, B, C, self.switched)
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
if "direct_store" in kwargs:
self.direct_store = kwargs["direct_store"]
else:
self.direct_store = False
@staticmethod
def get_operands(A: TensorDescription, B: TensorDescription, C: TensorDescription, swap: bool):
"""
Makes copies of A, B, and C, and possibly transposes their order. If ``swap`` is set,
A and B are swapped, and the layout of A, B, and C are transposed.
:param A: description of operand A
:type A: TensorDescription
:param B: description of operand B
:type B: TensorDescription
:param C: description of operand C
:type C: TensorDescription
:return: descriptions of operands A, B, and C
:rtype: tuple[TileDescription]
"""
if swap:
A_out = copy.deepcopy(B)
B_out = copy.deepcopy(A)
C_out = copy.deepcopy(C)
A_out.layout = transpose_layout(A_out.layout)
B_out.layout = transpose_layout(B_out.layout)
C_out.layout = transpose_layout(C_out.layout)
else:
A_out = copy.deepcopy(A)
B_out = copy.deepcopy(B)
C_out = copy.deepcopy(C)
return A_out, B_out, C_out
def run(self, arguments: GemmArguments) -> cuda.CUresult:
"""
Configure and launch the cuda kernel with input arguments
"""
if self.emission_type == EmissionType.Device:
raise Exception('Running a kernel via PyCUTLASS is only enabled with emission type "Kernel"')
err = self.rt_module.run(
arguments.host_workspace,
arguments.device_workspace,
arguments.launch_config,
arguments.stream
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
return err
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32,
]
return self.tile_description.math_instruction.math_operation in complex_operators
def is_planar_complex(self):
return self.gemm_kind in (GemmKind.PlanarComplex, GemmKind.PlanarComplexArray)
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
def core_name(self):
"""The basic operation kind is prefixed with a letter indicating the accumulation type."""
inst_shape = ""
inst_operation = ""
intermediate_type = ""
math_operations_map = {
MathOperation.xor_popc: "xor",
}
if (self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp):
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ""
if self.tile_description.math_instruction.instruction_shape is not None:
if self.api == ApiVersion.v3x and self.arch >= 90:
inst_shape = "%dx%dx%d" % tuple(
self.tile_description.math_instruction.instruction_shape)
else:
inst_shape = "%d%d%d" % tuple(
self.tile_description.math_instruction.instruction_shape)
else:
inst_shape = "Default"
inst_shape += math_op_string
if (self.tile_description.math_instruction.element_a != self.A.element and
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator):
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, GemmKindNames[self.gemm_kind])
def extended_name(self):
"""Append data types if they differ from compute type."""
if self.is_complex():
extended_name = "${core_name}"
else:
if (self.C.element != self.tile_description.math_instruction.element_accumulator and
self.A.element != self.tile_description.math_instruction.element_accumulator):
extended_name = "${element_c}_${core_name}_${element_a}"
elif (self.C.element == self.tile_description.math_instruction.element_accumulator and
self.A.element != self.tile_description.math_instruction.element_accumulator):
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
"element_a": DataTypeNames[self.A.element],
"element_c": DataTypeNames[self.C.element],
"core_name": self.core_name(),
})
return extended_name
def extended_name_3x(self):
"""Generates a string representing the MMA atom. Assumes accumulator type is C type."""
extended_name = "{core_name}_{element_a}_{element_b}_{element_acc}_{element_c}_{element_d}".format(
element_a=DataTypeNames[self.A.element],
element_b=DataTypeNames[self.B.element],
element_acc=DataTypeNames[self.accumulator_type()],
element_c=DataTypeNames[self.C.element],
element_d=DataTypeNames[self.epilogue_functor.element_output],
core_name=self.core_name())
return extended_name
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)]
)
return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout])
# Generates a short string representing the ABC layout tags (e.g. ntn or tnn)
def layout_name_3x(self):
if self.is_complex() or self.is_planar_complex():
return "{}{}{}".format(
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)],
ShortComplexLayoutNames[(self.C.layout, self.C.complex_transform)])
else:
return "{}{}{}".format(
ShortLayoutTypeNames[self.A.layout],
ShortLayoutTypeNames[self.B.layout],
ShortLayoutTypeNames[self.C.layout])
# Generates a short string representing underlying kernel schedule type
def kernel_schedule_name_3x(self):
if self.tile_description.kernel_schedule is None:
return KernelScheduleSuffixes[KernelScheduleType.ScheduleAuto]
else:
return KernelScheduleSuffixes[self.tile_description.kernel_schedule]
# Generates a short string representing underlying epilogue schedule type
def epilogue_schedule_name_3x(self):
if self.tile_description.epilogue_schedule is None:
return EpilogueScheduleSuffixes[EpilogueScheduleType.ScheduleAuto]
else:
return EpilogueScheduleSuffixes[self.tile_description.epilogue_schedule]
def procedural_name(self):
"""The full procedural name indicates architecture, extended name, tile size, and layout."""
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
if self.api == ApiVersion.v3x and self.arch >= 90:
kernel_name_template = "cutlass{p}_sm{ar}_{op}_{ex}_{tbm}x{tbn}x{tbk}_{cm}x{cn}x{ck}_{l}_{s}_align{al}{k}{e}"
return kernel_name_template.format(
p=self.prefix,
ar=self.arch,
op=opcode_class_name,
ex=self.extended_name_3x(),
tbm=self.tile_description.threadblock_shape[0],
tbn=self.tile_description.threadblock_shape[1],
tbk=self.tile_description.threadblock_shape[2],
cm=self.tile_description.cluster_shape[0],
cn=self.tile_description.cluster_shape[1],
ck=self.tile_description.cluster_shape[2],
l=self.tile_description.stages,
s=self.layout_name_3x(),
al=str(self.A.alignment),
k=self.kernel_schedule_name_3x(),
e=self.epilogue_schedule_name_3x()
)
else:
threadblock = self.tile_description.procedural_name_2x()
return "cutlass{p}_{op}_{ex}_{tb}_{l}_align{a}".format(
p=self.prefix,
op=opcode_class_name,
ex=self.extended_name(),
tb=threadblock,
l=self.layout_name(),
a=str(self.A.alignment)
)
def configuration_name(self):
"""The full procedural name indicates architecture, extended name, tile size, and layout."""
return self.procedural_name()
class GemmOperationUniversal(GemmOperationBase):
def __init__(self, arch, tile_description: TileDescription, A: TensorDescription, B, C,
epilogue_functor, swizzling_functor=SwizzlingFunctor.Identity1, **kwargs):
api = api_version(arch, tile_description.math_instruction.opcode_class, A.element)
super(GemmOperationUniversal, self).__init__(GemmKind.Universal, arch, tile_description,
A, B, C, epilogue_functor, swizzling_functor,
api=api, **kwargs, )
if api == ApiVersion.v3x:
if swizzling_functor == SwizzlingFunctor.StreamK:
raise Exception("Stream K swizzle functor is currently only supported for CUTLASS 2.x kernels")
self.rt_module = GemmRTUniversal3x(self)
else:
if swizzling_functor == SwizzlingFunctor.StreamK:
self.rt_module = GemmRTUniversalStreamK(self)
else:
self.rt_module = GemmRTUniversal(self)
self.argument_type = self.rt_module.argument_type
self.epilogue_type = self.rt_module.epilogue_type
def device_op(self):
"""
Returns a new GemmOperationUniversal object that is constructed with emission type
``EmissionType.Device``. Since the device-emitted kernel does not require swapping,
any swappng performed by the kernel-emitted operation is reversed.
:return: operation ready for device-level code emission
:rtype: GemmUniversalOperation
"""
A, B, C = GemmOperationBase.get_operands(self.A, self.B, self.C, self.switched)
return GemmOperationUniversal(self.arch, self.tile_description, A, B, C,
self.epilogue_functor, self.swizzling_functor,
emission_type=EmissionType.Device, direct_store=self.direct_store)
class GemmOperationGrouped(GemmOperationBase):
def __init__(self, arch, tile_description: TileDescription, A: TensorDescription, B, C,
epilogue_functor, swizzling_functor=SwizzlingFunctor.Identity1, **kwargs):
super(GemmOperationGrouped, self).__init__(GemmKind.Grouped, arch, tile_description,
A, B, C, epilogue_functor, swizzling_functor, **kwargs)
assert "precompute_mode" in kwargs.keys(), "missing keyword arguement 'precompute_mode'."
self.precompute_mode = kwargs["precompute_mode"]
self.rt_module = GemmRTGrouped(self)
self.argument_type = self.rt_module.argument_type
self.epilogue_type = self.rt_module.epilogue_type
def device_op(self):
"""
Returns a new GemmOperationGrouped object that is constructed with emission type
``EmissionType.Device``. Since the device-emitted kernel does not require swapping,
any swappng performed by the kernel-emitted operation is reversed.
:return: operation ready for device-level code emission
:rtype: GemmOperationGrouped
"""
A, B, C = GemmOperationBase.get_operands(self.A, self.B, self.C, self.switched)
return GemmOperationGrouped(
self.arch, self.tile_description, A, B, C, self.epilogue_functor,
self.swizzling_functor, emission_type=EmissionType.Device,
direct_store=self.direct_store, precompute_mode=self.precompute_mode, )
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
class EmitGemmUniversalInstance:
"""Responsible for emitting a CUTLASS template definition"""
def __init__(
self,
operation_suffix="",
direct_store=False
):
self.operation_suffix = operation_suffix
self.direct_store = direct_store
self.includes = [
"cutlass/cutlass.h",
"cutlass/gemm_coord.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/device/gemm.h",
"cutlass/gemm/device/gemm_universal_adapter.h",
"cutlass/gemm/kernel/default_gemm_universal.h",
]
if self.direct_store:
self.includes.append(
"cutlass/epilogue/threadblock/default_epilogue_direct_store.h"
)
self.gemm_template_kernel = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_device = """
// Gemm operator ${operation_name}
using DeviceKernel =
typename cutlass::gemm::device::GemmUniversal<
// Data type and layout of operand A
${element_a}, ${layout_a},
// Data type and layout of operand B
${element_b}, ${layout_b},
// Data type and layout of operand C
${element_c}, ${layout_c},
// Data type of accumulator
${element_accumulator},
// Class of operation
${opcode_class},
// Compute capability of the target kernel
${arch},
// Threadblock tile shape
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
// Warp tile shape
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
// Instruction shape
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
// Epilogue functor
${epilogue_functor},
// Swizzling function
${swizzling_functor},
// Number of pipeline stages
${stages},
// Alignment of operands A and B
${align_a}, ${align_b},
// Type of math operation
${math_operation},
// Complex transform types of operands A and B
${transform_a}, ${transform_b}
>;
"""
self.gemm_template_direct_store = """
// Gemm operator ${operation_name}
using ${operation_name}_default =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
using ${operation_name}_base =
cutlass::gemm::kernel::GemmUniversal<
${operation_name}_default::Mma,
cutlass::epilogue::threadblock::DefaultEpilogueDirectStore<
${operation_name}_default::Epilogue
>::Epilogue,
${operation_name}_default::ThreadblockSwizzle
>;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_kernel_visitor = """
using OutputTileThreadMap = cutlass::epilogue::threadblock::OutputTileThreadLayout<
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
${element_c},
${align_c},
${epilogue_stages} /* epilogue stages */
>;
${callback_decl}
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmWithVisitor<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c}, ${align_c},
${element_accumulator},
${element_epilogue},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${callback_name},
${swizzling_functor},
${stages},
${math_operation},
${epilogue_stages} /* epilogue stages */
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
instance_layout_A, instance_layout_B, instance_layout_C = \
(operation.A.layout, operation.B.layout, operation.C.layout)
if operation.emission_type == EmissionType.Kernel:
if self.direct_store:
gemm_template = self.gemm_template_direct_store
else:
gemm_template = self.gemm_template_kernel
else:
gemm_template = self.gemm_template_device
values = {
"operation_name": operation.procedural_name(),
"operation_suffix": self.operation_suffix,
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[instance_layout_A],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[instance_layout_B],
"element_c": DataTypeTag[operation.C.element],
"layout_c": LayoutTag[instance_layout_C],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"warp_shape_m": str(warp_shape[0]),
"warp_shape_n": str(warp_shape[1]),
"warp_shape_k": str(warp_shape[2]),
"instruction_shape_m": str(operation.tile_description.math_instruction.instruction_shape[0]),
"instruction_shape_n": str(operation.tile_description.math_instruction.instruction_shape[1]),
"instruction_shape_k": str(operation.tile_description.math_instruction.instruction_shape[2]),
"swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor],
"stages": str(operation.tile_description.stages),
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
"transform_a": ComplexTransformTag[operation.A.complex_transform],
"transform_b": ComplexTransformTag[operation.B.complex_transform],
"math_operation": MathOperationTag[operation.tile_description.math_instruction.math_operation],
}
if hasattr(operation.epilogue_functor, "visitor"):
self.includes += [
"cutlass/epilogue/threadblock/fusion/visitors.hpp",
"cutlass/gemm/kernel/default_gemm_universal_with_visitor.h"
]
callback_name, callback_decl = operation.epilogue_functor.emit(operation)
values["callback_name"] = callback_name
values["callback_decl"] = callback_decl
values["align_c"] = str(operation.C.alignment)
values["element_epilogue"] = DataTypeTag[operation.epilogue_functor.element_epilogue]
if hasattr(operation.epilogue_functor, "epilogue_stages"):
epilogue_stages = operation.epilogue_functor.epilogue_stages
else:
epilogue_stages = 1
values["epilogue_stages"] = str(epilogue_stages)
return SubstituteTemplate(self.gemm_template_kernel_visitor, values)
else:
values["epilogue_functor"] = operation.epilogue_functor.emit()
return SubstituteTemplate(gemm_template, values)
class EmitGemmGroupedInstance:
"""Responsible for emitting a CUTLASS template definition"""
def __init__(self, operation_suffix=""):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/kernel/gemm_grouped.h",
"cutlass/gemm/kernel/default_gemm_grouped.h",
]
self.gemm_template_kernel = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmGrouped<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${precompute_mode},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_device = (
self.gemm_template_kernel
+ """
using DeviceKernel = cutlass::gemm::device::GemmGrouped<${operation_name}_base>;
"""
)
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmGrouped<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
instance_layout_A, instance_layout_B, instance_layout_C = \
(operation.A.layout, operation.B.layout, operation.C.layout)
# Support built-in epilogue functors or user-defined functions
epilogue_functor = operation.epilogue_functor.emit()
values = {
"operation_name": operation.procedural_name(),
"operation_suffix": self.operation_suffix,
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[instance_layout_A],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[instance_layout_B],
"element_c": DataTypeTag[operation.C.element],
"layout_c": LayoutTag[instance_layout_C],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"warp_shape_m": str(warp_shape[0]),
"warp_shape_n": str(warp_shape[1]),
"warp_shape_k": str(warp_shape[2]),
"instruction_shape_m": str(operation.tile_description.math_instruction.instruction_shape[0]),
"instruction_shape_n": str(operation.tile_description.math_instruction.instruction_shape[1]),
"instruction_shape_k": str(operation.tile_description.math_instruction.instruction_shape[2]),
"epilogue_functor": epilogue_functor,
"swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor],
"stages": str(operation.tile_description.stages),
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
"transform_a": ComplexTransformTag[operation.A.complex_transform],
"transform_b": ComplexTransformTag[operation.B.complex_transform],
"precompute_mode": SchedulerModeTag[operation.precompute_mode],
"math_operation": MathOperationTag[operation.tile_description.math_instruction.math_operation],
}
if operation.emission_type == EmissionType.Kernel:
gemm_template = self.gemm_template_kernel
else:
gemm_template = self.gemm_template_device
return SubstituteTemplate(gemm_template, values)
| python/cutlass/backend/gemm_operation.py/0 | {
"file_path": "python/cutlass/backend/gemm_operation.py",
"repo_id": "python",
"token_count": 36867
} | 41 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Ease-of-use interface for constructing, compiling, and running CONVs
The ``Conv2d`` interface is meant to allow one to easily instantiate, compile, and run
CONV2D operations in CUTLASS via Python, without specifying many configuration parameters.
Under the hood, the interface will select sensible default parameters for the many template
parameters for CUTLASS CONVs.
Note: optimal performance is not to be expected from this interface. To achieve optimal
performance, one should specify and tune each configuration parameter.
The simplest example of using this interface is the following:
.. highlight:: python
.. code-block:: python
# A, B, C, and D are torch/numpy/cupy tensor objects
plan = cutlass.op.Conv(A, B, C, D)
plan.run(stride=(1, 1), padding=(0, 0), dilation=(1, 1))
One can also use the interface by specifying data types of operands at construction
and using different tensor objects with these data types at runtime:
.. highlight:: python
.. code-block:: python
# The following is shorthand for:
# cutlass.op.Conv2d(kind="fprop",
# element_A=torch.float32, element_B=torch.float32,
# element_C=torch.float32, element_D=torch.float32,
# element_accumulator=torch.float32)
plan = cutlass.op.Conv2d(kind="fprop", element=torch.float32)
A0 = torch.rand((128, 256), dtype=torch.float32, device='cuda')
B0 = torch.rand((256, 64), dtype=torch.float32, device='cuda')
C0 = torch.zeros((128, 64), dtype=torch.float32, device='cuda')
D0 = torch.zeros((128, 64), dtype=torch.float32, device.'cuda')
plan.run(A0, B0, C0, D0, stride=(1, 1), padding=(0, 0), dilation=(1, 1))
A = torch.rand((32, 128), dtype=torch.float32, device='cuda')
B = torch.rand((128, 256), dtype=torch.float32, device='cuda')
C = torch.zeros((32, 256), dtype=torch.float32, device='cuda')
D = torch.zeros((32, 256), dtype=torch.float32, device.'cuda')
plan.run(A1, B1, C1, D1, stride=(1, 1), padding=(0, 0), dilation=(1, 1))
The interface additionally enables one to decouple the compilation of the underlying CUTLASS
kernel from its execution:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Conv2d(kind="fprop", element=np.float32)
# Do other work...
plan.run(A0, B0, C0, D0, stride=(1, 1), padding=(0, 0), dilation=(1, 1))
# Do other work...
plan.run(A1, B1, C1, D1, stride=(1, 1), padding=(0, 0), dilation=(1, 1))
Elementwise activation functions are easily fused to the GEMM via the interface:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Conv2d(kind="fprop", element=np.float32)
plan.activation = cutlass.epilogue.relu
Operations can also be run asynchronously:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Conv2d(kind="fprop", element=np.float32)
args = plan.run()
# Do other work...
args.sync()
"""
from cuda import cuda
from cutlass_library import (
ConvKind,
ConvMode,
DataTypeSize,
IteratorAlgorithm,
OperationKind,
SplitKMode,
StrideSupport,
)
import cutlass
from cutlass import epilogue
from cutlass.backend import compiler
from cutlass.backend.conv2d_operation import Conv2dArguments, Conv2dOperation
from cutlass.backend.reduction_operation import ReductionOperation, ReductionArguments
from cutlass.backend.library import TensorDescription, TileDescription
from cutlass.op.op import OperationBase
from cutlass.shape import Conv2DProblemSize, MatrixCoord
from cutlass.utils import check, datatypes
class Conv2d(OperationBase):
"""
Constructs a ``Conv2d`` object.
The convolution kind (fprop, wgrad, degrad), the data types of operands A, B, and C,
along with the data type of output D and that used for accumulation, are bound to the ``Conv``
object throughout its lifetime -- these are not to be changed after a ``Conv2d`` has been constructed.
The constructor has optional parameters for flexibly setting these parameters. The following
constructors are equivalent:
.. highlight:: python
.. code-block:: python
# Use F32 for A, B, C, D, and accumulation in fprop
# Use the generic ``element`` parameter to concisely set all data types for operands to the same values.
Conv2d(kind="fprop", element=cutlass.DataType.f32)
# Explicitly specify the data types to use for A, B, C, and D.
Conv2d(kind="fprop", element_A=cutlass.DataType.f32, element_B=cutlass.DataType.f32,
element_C=cutlass.DataType.f32, element_D=cutlass.DataType.f32)
# Set the data types and elements from existing tensors. Note that one can use different tensors when
# executing GEMM via the ``run()`` method than passed in here (though those passed in to ``run()`` must
# have the same data type as those passed in here).
# A, B, C, and D are torch.Tensor objects of type torch.float32 under the channel-last layout
Conv2d(kind="fprop", A=A, B=B, C=C, D=D)
# Explicitly specify the data type for only some of A, B, C, and D. Unspecified data types will inherit
# those passed in via the generic ``element``
Conv2d(kind="fprop", element_A=cutlass.DataType.f32, element_accumulator=cutlass.DataType.f32,
element=cutlass.DataType.f32)
The order of precedence for the setting of the data type for a given operand/output is as follows:
1) If the tensor type is specified (e.g., ``A``), use the data type inferred from this tensor
2) Otherwise, if the data type (e.g., ``element_A``) is specified, use those
3) Otherwise, use the generic values (e.g., ``element``)
:param kind: the convolution kind (i.e. fprop, wgrad, and dgrad)
:type kind: str
:param A: tensor representing data type of operand A
:param B: tensor representing data type of operand B
:param C: tensor representing data type of operand C
:param D: tensor representing data type of operand D
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type
:type element: cutlass.DataType
:param element_A: data type to be used for operand A
:type element_A: cutlass.DataType
:param element_B: data type to be used for operand B
:type element_B: cutlass.DataType
:param element_C: data type to be used for operand C
:type element_C: cutlass.DataType
:param element_D: data type to be used for operand D
:type element_D: cutlass.DataType
:param element_accumulator: data type to be used in accumulation of the product of operands A and B
:type element_accumulator: cutlass.DataType
:param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90
:type cc: int
:param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80
:type kernel_cc: int
"""
def __init__(
self, kind="fprop",
A=None, B=None, C=None, D=None, alpha=1.0, beta=0.0,
element=None,
element_A=None, element_B=None, element_C=None, element_D=None,
element_accumulator=None,
cc: int = None, kernel_cc: int = None
):
super().__init__(cc=cc, kernel_cc=kernel_cc, operation_kind=OperationKind.Conv2d)
# Verify the kernel cc
if self.current_cc == 90:
# The Conv2d kernel on Hopper (SM90) is currently unsupported
# Revert to use SM80-tagged kernels
cutlass.logger.warning("Reverting to using SM80-tagged kernel. Opclass may change.")
self.specified_kernel_cc = 80
self._reset_options(80)
# The arch is used in testing
self.arch = self.current_cc
self.name = "conv2d" + kind
# The convolution kind. (concept: cutlass_library.library.ConvKind)
self.conv_kind = datatypes.getattr_enum(ConvKind, kind)
# The element types (concept: cutlass library types) of A, B, C, and D
elements = []
layouts = []
# Complete the data types based on user-provided arguments
for elt, tens, name in zip([element_A, element_B, element_C, element_D],
[A, B, C, D],
["A", "B", "C", "D"]):
if elt is not None and tens is not None:
raise Exception(f'Must not specify both element_{name} and tensor {name}')
if elt is None and tens is None and element is None:
raise Exception(f'Must specify one of element_{name}, tensor {name}, or generic element.')
elt_to_set = None
lay_to_set = None
if tens is not None:
elt_to_set, _ = datatypes.get_datatype_and_layout(tens)
else:
elt_to_set = elt if elt is not None else element
assert elt_to_set is not None
# Currently we only support layout TensorNHWC
lay_to_set = cutlass.LayoutType.TensorNHWC
elements.append(datatypes.library_type(elt_to_set))
layouts.append(lay_to_set)
self._element_a, self._element_b, self._element_c, self._element_d = elements
self._layout_a, self._layout_b, self._layout_c, self._layout_d = layouts
self.A, self.B, self.C, self.D, self.alpha, self.beta = A, B, C, D, alpha, beta
if element_accumulator is None:
self._element_accumulator = self._element_c
else:
self._element_accumulator = datatypes.library_type(element_accumulator)
# Default inputs if none is supplied in run()
self.A = A
self.B = B
self.C = C
self.D = D
self.alpha = alpha
self.beta = beta
# We only specify the stride of the swizzling functor here
# The actual swizzling functor is determined in run based on conv_kind and stride
self._swizzling_stride = 1
# Arguments that will be set to default value in _reset_operations
# The default tile_description and op_class are fetched from manifest of cutlass library
self._tile_description = None
self.op_class = None
# The default identity epilogue will be created
self.epilogue_functor = None
self._reset_operations()
# Arguments that will be determined online based on arguments of "run"
# based on stride, input/output channels, alignment, and conv_kind
self._iterator_algorithm = None
self._stride_support = None
def _reset_operations(self, reset_epilogue: bool = True):
# Set the default op class
datatype_comb = (self._element_a, self._element_b, self._element_accumulator)
layout_comb = (self._layout_a, self._layout_b)
self.possible_op_classes = self.options.supporting_opclasses(
self._element_a, self._element_b, self._element_accumulator,
self._layout_a, self._layout_b, self._math_operation
)
if cutlass.OpcodeClass.TensorOp in self.possible_op_classes:
self.opclass = cutlass.OpcodeClass.TensorOp
elif cutlass.OpcodeClass.Simt in self.possible_op_classes:
self.opclass = cutlass.OpcodeClass.Simt
else:
if self._math_operation is not None:
math_op_str = f' and math operation {self._math_operation}'
else:
math_op_str = ''
raise Exception(f'No kernel configuration found for supported data type and layout '
f'combination {datatype_comb}x{layout_comb}{math_op_str}')
if reset_epilogue:
self._reset_epilogue_functor_activation(epilogue.identity)
self.alignment_pref_A = min(
128 // DataTypeSize[self._element_a], max(self.possible_operations.alignments("A")))
self.alignment_pref_B = min(
128 // DataTypeSize[self._element_b], max(self.possible_operations.alignments("B")))
self.alignment_pref_C = min(
128 // DataTypeSize[self._element_c], max(self.possible_operations.alignments("C")))
#
# Tile description Related
#
@property
def tile_description(self) -> TileDescription:
"""
Returns the tile description
"""
return self._tile_description
@tile_description.setter
def tile_description(
self, td=None):
"""
Set the tile description
:param td: tile description
:type td: cutlass.backend.TileDescription, or a dict with keys
{
"threadblock_shape": [int, int, int],
"warp_count": [int, int, int],
"stages": int,
"instruction_shape": [int, int, int] (optional),
"cluster_shape": [int, int, int] (optional)
}
"""
if td is None:
return
if isinstance(td, dict):
if self._tile_description is None:
op = self.possible_operations.default_operation(self._math_operation)
self._tile_description = datatypes.td_from_profiler_op(op)
if "cluster_shape" in td.keys():
if td["cluster_shape"] != [1, 1, 1]:
cutlass.logger.warning("Conv2d currently only support 'cluster_shape'=[1, 1, 1]'.")
td["cluster_shape"] = [1, 1, 1]
td = self._tile_description.clone_and_update(td)
valid, msg = self._valid_tile_description(td)
if valid:
self._tile_description = td
else:
raise Exception(msg)
def _valid_tile_description(self, td: TileDescription) -> tuple:
"""
Checks whether the provided tile description is valid for the given compute capability. At present,
this checks the following:
- Does the tile description use a number of stages supported by the compute capability in question?
- Does the tile size requested fit within shared memory?
- Are cluster dimensions outside the valid range requested for a given architecture (e.g.,
more non-unit cluster dimensions for pre-SM90 architectures)?
- Is the kernel schedule being used supported on the architecture in question?
:param td: tile description to validate
:type td: cutlass.backend.TileDescription
:return: tuple in which the first element is a bool indicating that the tile description is valid
and the second element is a string providing an optional error message.
:rtype: tuple
"""
valid, msg = check.valid_stage_count(self.cc, self.current_cc, td)
if not valid:
return (valid, msg)
valid, msg = check.valid_cluster_shape(self.current_cc, td.cluster_shape)
if not valid:
return (valid, msg)
return valid, msg
def tile_descriptions(self) -> list:
"""
Returns a list of valid tile descriptions for the operations
:returns: list of valid tile descriptions for the operations
:rtype: list
"""
descriptions = []
description_str = []
for op in self.possible_operations.all_operations:
td = datatypes.td_from_profiler_op(op)
if self._math_operation is not None:
if td.math_instruction.math_operation != self._math_operation:
continue
if str(td) not in description_str:
description_str.append(str(td))
descriptions.append(td)
return descriptions
#
# Swizzling functor Related
#
@property
def swizzling_stride(self):
"""
Returns the stride of swizzling currently being used by the Conv2d
:return: swizzing stride
"""
return self._swizzling_stride
@swizzling_stride.setter
def swizzling_stride(self, stride: int):
"""
Sets the swizzling functor to the type specified by `swizzling_functor`
"""
if not isinstance(stride, int):
raise Exception(f"Expect integer (1, 2, 4, 8), got {stride}")
self._swizzling_stride = stride
def _propose_swizzling_functor(self, stride):
"""
Automatically propose the swizzling functor based on the stride
"""
if self.conv_kind == ConvKind.Dgrad:
if stride[0] != 1 or stride[1] != 1:
return getattr(cutlass.swizzle, f"StridedDgradIdentitySwizzle{self._swizzling_stride}")
return getattr(cutlass.swizzle, f"IdentitySwizzle{self._swizzling_stride}")
#
# Iterator Algorithm Related
#
@property
def iterator_algorithm(self) -> IteratorAlgorithm:
"""
Returns the iterator algorithm
"""
return self._iterator_algorithm
@iterator_algorithm.setter
def iterator_algorithm(self, alg: str):
"""
Sets the iterator algorithm
:param alg: The iterator algorithm
:type td: string, options: "analytic", "optimized", "few_channels", and "fixed_channels"
"""
iterator_alg = datatypes.getattr_enum(IteratorAlgorithm, alg)
# Check if the iterator algorithm is valid
if iterator_alg in [IteratorAlgorithm.FewChannels, IteratorAlgorithm.FixedChannels] and self.conv_kind != ConvKind.Fprop:
raise Exception(f"{self.conv_kind} does not support iterator algorithm {alg}.")
self._iterator_algorithm = iterator_alg
def _propose_iterator_algorithm(self, problem_size, alignment_a, alignment_b) -> IteratorAlgorithm:
"""
Propose a valid iterator algorithm based on problem size and alignment
"""
if self.conv_kind == ConvKind.Fprop:
# Check whether the fixed channel is applicable
if problem_size.C == alignment_a:
return IteratorAlgorithm.FixedChannels
elif (problem_size.C % alignment_a == 0 and
problem_size.R <= 32 and problem_size.S <= 32):
return IteratorAlgorithm.Optimized
else:
return IteratorAlgorithm.Analytic
elif self.conv_kind == ConvKind.Dgrad:
if (problem_size.K % alignment_a == 0 and
problem_size.R <= 32 and problem_size.S <= 32 and
problem_size.C % alignment_b == 0):
return IteratorAlgorithm.Optimized
else:
return IteratorAlgorithm.Analytic
elif self.conv_kind == ConvKind.Wgrad:
if (problem_size.K % alignment_a == 0 and
problem_size.C % alignment_b == 0):
return IteratorAlgorithm.Optimized
else:
return IteratorAlgorithm.Analytic
def _validate_iterator_algorithm(self, iterator_algorithm, problem_size, alignment_a, alignment_b) -> bool:
"""
Validate whether the user provide iterator algorithm works for the given problem size
"""
if self.conv_kind == ConvKind.Fprop:
if iterator_algorithm == IteratorAlgorithm.FixedChannels:
return problem_size.C == alignment_a
elif iterator_algorithm == IteratorAlgorithm.Optimized:
return (problem_size.C % alignment_a == 0 and
problem_size.R <= 32 and problem_size.S <= 32)
elif iterator_algorithm == IteratorAlgorithm.FewChannels:
return problem_size.C % alignment_a == 0
elif self.conv_kind == ConvKind.Dgrad:
if iterator_algorithm == IteratorAlgorithm.Optimized:
return (problem_size.K % alignment_a == 0 and
problem_size.R <= 32 and problem_size.S <= 32 and
problem_size.C % alignment_b == 0)
elif self.conv_kind == ConvKind.Wgrad:
if iterator_algorithm == IteratorAlgorithm.Optimized:
return (problem_size.K % alignment_a == 0 and
problem_size.C % alignment_b == 0)
return True
#
# Stride Support Related
#
def _propose_stride_support(self, stride):
if self.conv_kind == ConvKind.Dgrad:
if stride[0] == 1 and stride[1] == 1:
return StrideSupport.Unity
return StrideSupport.Strided
#
# Construct and Compilation
#
def construct(
self, tile_description: TileDescription = None,
alignment_A: int = None, alignment_B: int = None, alignment_C: int = None,
iterator_algorithm: IteratorAlgorithm = None,
stride_support = None, swizzling_functor: cutlass.swizzle = None,
epilogue_functor=None) -> cutlass.backend.Conv2dOperation:
"""
Constructs a ``cutlass.backend.Conv2dOperation`` based on the input parameters and current
kernel specification of the ``Conv2d`` object.
:param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:param iterator_algorithm: the iterator algorithm used
:type iterator_algorithm: cutlass_library.library.IteratorAlgorithm
:param stride_support: the stride support of dgrad
:type stride_support: cutlass_library.library.StrideSupport
:param swizzling_functor: the swizzling functor
:type swizzling_functor: cutlass.swizzle
:param epilogue_functor: the epilogue functor
:return: operation that was constructed
:rtype: cutlass.backend.Conv2dOperation
"""
# Get alignment
alignment_A = check.alignment_or_default(alignment_A, self.alignment_pref_A)
alignment_B = check.alignment_or_default(alignment_B, self.alignment_pref_B)
alignment_C = check.alignment_or_default(alignment_C, self.alignment_pref_C)
tensor_A = TensorDescription(self._element_a, self._layout_b, alignment_A)
tensor_B = TensorDescription(self._element_b, self._layout_b, alignment_B)
tensor_C = TensorDescription(self._element_c, self._layout_c, alignment_C)
if tile_description is None:
if self.tile_description is not None:
tile_description = self.tile_description
else:
op = self.possible_operations.operations(alignment_A, alignment_B, alignment_C, self._math_operation)[0]
tile_description = datatypes.td_from_profiler_op(op)
else:
valid, err_str = self._valid_tile_description(tile_description)
if not valid:
raise Exception(f"Invalid tile description. {err_str}")
self.tile_description = tile_description
if iterator_algorithm is None:
# If the iterator algorithm is already set
if self.iterator_algorithm is not None:
iterator_algorithm = self.iterator_algorithm
else:
# Otherwise, we conservatively use the analytic iterator for correctness
iterator_algorithm = IteratorAlgorithm.Analytic
if stride_support is None:
# If the stride support is already set
if self._stride_support is not None:
stride_support = self._stride_support
else:
# Otherwise, we assume strided
stride_support = StrideSupport.Strided
if swizzling_functor is None:
# If the swizzling functor is already set
swizzling_functor = self._propose_swizzling_functor(stride=(2, 2))
if epilogue_functor is None:
if self.epilogue_functor is not None:
epilogue_functor = self.epilogue_functor
else:
epilogue_functor = self._create_epilogue_functor_activation(self._activation)
# Reset the alignment of the epilogue functor
epilogue_functor = self._reset_epilogue_functor_alignment(alignment_C, epilogue_functor)
operation = Conv2dOperation(
conv_kind=self.conv_kind,
iterator_algorithm=iterator_algorithm,
arch=self.current_cc,
tile_description=tile_description,
A=tensor_A, B=tensor_B, C=tensor_C,
stride_support=stride_support,
epilogue_functor=epilogue_functor,
swizzling_functor=swizzling_functor,
)
return operation
def compile(self, tile_description: TileDescription = None,
alignment_A: int = None, alignment_B: int = None, alignment_C: int = None,
iterator_algorithm: IteratorAlgorithm = None,
stride_support = None, swizzling_functor: cutlass.swizzle = None,
epilogue_functor = None, print_module: bool = False) -> cutlass.backend.Conv2dOperation:
"""
Emits and compiles the kernel currently specified. If ``tile_description`` and any
of the ``alignment`` parameters are set, the kernel will be chosen using this
tile description and alignments. Otherwise, a default tile description and alignment
will be used.
::param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:param iterator_algorithm: the iterator algorithm used
:type iterator_algorithm: cutlass_library.library.IteratorAlgorithm
:param stride_support: the stride support of dgrad
:type stride_support: cutlass_library.library.StrideSupport
:param swizzling_functor: the swizzling functor
:type swizzling_functor: cutlass.swizzle
:param epilogue_functor: the epilogue functor
:return: operation that was compiled
:rtype: cutlass.backend.Conv2dOperation
"""
self.operation = self.construct(
tile_description, alignment_A, alignment_B, alignment_C,
iterator_algorithm, stride_support, swizzling_functor, epilogue_functor)
if print_module:
print(self.operation.rt_module.emit())
compiler.add_module([self.operation,])
return self.operation
#
# Run Related
#
def _verify_type_and_layout(self, tensor, ref_type, ref_layout, name):
"""
Verifies that ``tensor`` has data type ``ref_type`` and layout ``ref_layout``. An exception
is raised if it does not.
:param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type tensor: numpy/cupy/torch array/tensor object
:param ref_dtype: data type for the tensor that this object was initialized to
:param name: identifier of the tensor to verify. Used in raising exceptions
:type name: str
"""
dtype, _ = datatypes.get_datatype_and_layout(tensor)
if dtype != ref_type:
raise Exception(f'Tensor {name} with type and layout {dtype} '
f'does not match the expected type of {ref_type}.')
def _get_and_verify_conv_problem_size(self, A, B, C, stride, padding, dilation):
if self.conv_kind == ConvKind.Fprop:
input = A
weight = B
output = C
output_tensor = "C"
elif self.conv_kind == ConvKind.Dgrad:
output = A
weight = B
input = C
output_tensor = "A"
elif self.conv_kind == ConvKind.Wgrad:
output = A
input = B
weight = C
output_tensor = "A"
else:
raise Exception(f"Convolution kind {self.conv_kind} is not supported")
N_, H_, W_, C_ = datatypes.get_tensor_shape(input, op="CONV")
K_, R_, S_, _ = datatypes.get_tensor_shape(weight, op="CONV")
_, P_, Q_, _ = datatypes.get_tensor_shape(output, op="CONV")
problem_size = Conv2DProblemSize(
N_, H_, W_, C_,
K_, R_, S_, C_,
padding[0], padding[1],
stride[0], stride[1],
dilation[0], dilation[1],
ConvMode.CrossCorrelation,
1, 1
)
if P_ != problem_size.P or Q_ != problem_size.Q:
raise Exception(
f"Tensor {output_tensor} size should be ({N_}, {problem_size.P}, {problem_size.Q}, {K_}), got ({N_}, {P_}, {Q_}, {K_})")
return problem_size
def run(self, A=None, B=None, C=None, D=None,
stride=(1, 1), padding=(0, 0), dilation=(1, 1),
alpha=None, beta=None,
split_k=("serial", 1), sync: bool = True,
print_module: bool = False,
stream: cuda.CUstream = cuda.CUstream(0)) -> Conv2dArguments:
"""
Runs the kernel currently specified. If it has not already been, the kernel is emitted and
compiled. Tensors holding operands and outputs of the kernel are sourced either from the
``A``, ``B``, ``C``, ``D``, ``alpha``, and ``beta``
parameters provided in the call, or from those
passed in on the construction of this object -- one of the two must be specified.
By default, this call returns only once the kernel has completed. To launch the kernel
and immediately return, set ``sync=False``. In this case, it is the responsibility of the
caller to syncrhonize the results of the kernel before attempting to access outputs
by calling ``sync()`` on the arguments returned from this call.
:param A: tensor representing data type and layout of operand A
:param B: tensor representing data type and layout of operand B
:param C: tensor representing data type and layout of operand C
:param D: tensor representing data type and layout of operand D
:param stride: (stride_h, stride_w) describing the convolution stride. Default: (1, 1)
:param padding: (pad_h, pad_w) describing the convolution padding. Default: (0, 0)
:param dilation: (dilation_h, dilation_w) describing the dilation of convolution. Default: (1, 1)
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param split_k: a tuple (split_k_mode, split_k_slices)
:param sync: whether the call should wait for the kernel to complete before returning
:type sync: bool
:param print_module: whether to print the emitted C++ code
:type print_module: bool
:param stream: cuda stream, defaults to cuda.cuda.CUstream(0)
:type stream: :class:`cuda.cuda.CUstream`
:return: arguments passed in to the kernel
:rtype: cutlass.backend.Conv2dArguments
"""
super().run_setup()
A = self._verify_tensor(A, self.A, self._element_a, self._layout_a, "A")
B = self._verify_tensor(B, self.B, self._element_b, self._layout_b, "B")
C = self._verify_tensor(C, self.C, self._element_c, self._layout_c, "C")
D = self._verify_tensor(D, self.D, self._element_d, self._layout_d, "D")
alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha")
beta = self._verify_scalar(beta, self.beta, self._element_c, "beta")
# handle the case when there is no C
if C is None:
if beta != 0:
raise Exception(f"With beta {beta} != 0, C has to be provided.")
else:
C = D
# Construct problem size based on input
# It also verifies whether the A, B, C, D, stride, padding, and dilation are matching
problem_size = self._get_and_verify_conv_problem_size(A, B, C, stride, padding, dilation)
# Propose stride support based on input
stride_support = self._propose_stride_support(stride)
# Propose swizzling functor
swizzling_functor = self._propose_swizzling_functor(stride)
shape_a = datatypes.get_tensor_shape(A, op="CONV")
shape_b = datatypes.get_tensor_shape(B, op="CONV")
shape_c = datatypes.get_tensor_shape(C, op="CONV")
# Get the alignment
alignment_a = self.possible_operations.find_alignment(shape_a, self._layout_a, operand="A")
alignment_b = self.possible_operations.find_alignment(shape_b, self._layout_b, operand="B")
alignment_c = self.possible_operations.find_alignment(shape_c, self._layout_c, operand="C")
alignment_a = check.update_alignment(alignment_a, self.alignment_pref_A)
alignment_b = check.update_alignment(alignment_b, self.alignment_pref_B)
alignment_c = check.update_alignment(alignment_c, self.alignment_pref_C)
# Propose iterator algorithm based on input
if self._iterator_algorithm is None:
# Propose a default iterator algorithm based on the problem size
iterator_algorithm = self._propose_iterator_algorithm(problem_size, alignment_a, alignment_b)
else:
if (self._validate_iterator_algorithm(self._iterator_algorithm, problem_size, alignment_a, alignment_b)):
iterator_algorithm = self._iterator_algorithm
else:
raise Exception(f"Iterator algorithm {self._iterator_algorithm} is invalid for current problem.")
epilogue_args = [alpha, beta]
if hasattr(self, "_activation_args"):
if isinstance(self._activation_args, list):
epilogue_args += self._activation_args
else:
epilogue_args.append(self._activation_args)
if split_k[0] == "parallel" and split_k[1] > 1:
epilogue_functor = self._create_epilogue_functor_activation(epilogue.identity)
else:
epilogue_functor = self.epilogue_functor
# The alignment is determined by the iterator function (I believe)
self.compile(tile_description=self.tile_description, alignment_A=alignment_a, alignment_B=alignment_b,
alignment_C=alignment_c, iterator_algorithm=iterator_algorithm, stride_support=stride_support,
swizzling_functor=swizzling_functor, epilogue_functor=epilogue_functor, print_module=print_module)
# Create reduction operation for parallel split-k
if split_k[0] == "parallel" and split_k[1] > 1:
epilogue_functor_reduction = self._reset_epilogue_functor_alignment(alignment_c, self.epilogue_functor)
self.reduction_operation = ReductionOperation(
shape=MatrixCoord(4, 32 * alignment_c), C=self.operation.C,
element_accumulator=self._element_accumulator,
element_compute=self._element_accumulator,
epilogue_functor=epilogue_functor_reduction,
count=alignment_c
)
if print_module:
print(self.reduction_operation.rt_module.emit())
compiler.add_module([self.reduction_operation,])
arguments = Conv2dArguments(
operation=self.operation, problem_size=problem_size,
A=A, B=B, C=C, D=D,
output_op=self.operation.epilogue_type(*epilogue_args),
split_k_mode=datatypes.getattr_enum(SplitKMode, split_k[0]),
split_k_slices=split_k[1],
stream=stream
)
self.operation.run(arguments)
if split_k[0] == "parallel" and split_k[1] > 1:
implicit_gemm_size = arguments.problem_size.implicit_gemm_size(self.conv_kind)
reduction_arguments = ReductionArguments(
self.reduction_operation,
problem_size=[implicit_gemm_size.m, implicit_gemm_size.n],
partitions=split_k[1],
workspace=arguments.ptr_D,
destination=D,
source=C,
output_op=self.reduction_operation.epilogue_type(*epilogue_args),
stream=stream
)
self.reduction_operation.run(reduction_arguments)
if sync:
if split_k[0] == "parallel" and split_k[1] > 1:
reduction_arguments.sync()
# Free memory allocated by args because we are not
# calling `arguments.sync()` in this case (which will free memory)
arguments.free()
else:
arguments.sync()
return arguments
#
# Helper functions
#
@staticmethod
def output_size(input_size, weight_size, padding, stride, dilation):
problem_size = Conv2DProblemSize(
*input_size,
*weight_size,
padding[0], padding[1],
stride[0], stride[1],
dilation[0], dilation[1],
ConvMode.CrossCorrelation,
1, 1
)
return (problem_size.N, problem_size.P, problem_size.Q, problem_size.K)
#
# Easy to use interfaces for fprop, wgrad, and dgrad
#
class Conv2dFprop(Conv2d):
def __init__(
self,
input=None, weight=None, C=None, output=None, alpha=1, beta=0,
element=None,
element_input=None, element_weight=None, element_C=None, element_output=None,
element_accumulator=None,
cc: int = None, kernel_cc: int = None):
A, B, D = input, weight, output
element_A, element_B, element_D = element_input, element_weight, element_output
super().__init__(
"fprop", A, B, C, D, alpha, beta, element,
element_A, element_B, element_C, element_D,
element_accumulator, cc, kernel_cc)
def run(
self, input=None, weight=None, C=None, output=None, alpha=None, beta=None,
stride=(1, 1), padding=(0, 0), dilation=(1, 1), split_k=("serial", 1),
sync: bool = True, print_module: bool = False,
stream: cuda.CUstream = cuda.CUstream(0)) -> Conv2dArguments:
A, B, D = input, weight, output
return super().run(
A, B, C, D, alpha, beta, stride, padding, dilation, split_k, sync, print_module, stream)
class Conv2dDgrad(Conv2d):
def __init__(
self,
grad_output=None, weight=None, C=None, grad_input=None, alpha=1, beta=0,
element=None,
element_grad_output=None, element_weight=None, element_C=None, element_grad_input=None,
element_accumulator=None,
cc: int = None, kernel_cc: int = None):
A, B, D = grad_output, weight, grad_input
element_A, element_B, element_D = element_grad_output, element_weight, element_grad_input
super().__init__(
"dgrad", A, B, C, D, alpha, beta, element,
element_A, element_B, element_C, element_D,
element_accumulator, cc, kernel_cc)
def run(self, grad_output=None, weight=None, C=None, grad_input=None, alpha=None, beta=None,
stride=(1, 1), padding=(0, 0), dilation=(1, 1), split_k=("serial", 1),
sync: bool = True, print_module: bool = False,
stream: cuda.CUstream = cuda.CUstream(0)) -> Conv2dArguments:
#
A, B, D = grad_output, weight, grad_input
return super().run(
A, B, C, D, alpha, beta, stride, padding, dilation, split_k, sync, print_module, stream)
class Conv2dWgrad(Conv2d):
def __init__(
self,
grad_output=None, input=None, C=None, grad_weight=None, alpha=1, beta=0,
element=None,
element_grad_output=None, element_input=None, element_C=None, element_grad_weight=None,
element_accumulator=None,
cc: int = None, kernel_cc: int = None):
A, B, D = grad_output, input, grad_weight
element_A, element_B, element_D = element_grad_output, element_input, element_grad_weight
super().__init__(
"wgrad", A, B, C, D, alpha, beta, element,
element_A, element_B, element_C, element_D,
element_accumulator, cc, kernel_cc)
def run(self, grad_output=None, input=None, C=None, grad_weight=None, alpha=None, beta=None,
stride=(1, 1), padding=(0, 0), dilation=(1, 1), split_k=("serial", 1),
sync: bool = True, print_module: bool = False,
stream: cuda.CUstream = cuda.CUstream(0)) -> Conv2dArguments:
#
A, B, D = grad_output, input, grad_weight
return super().run(
A, B, C, D, alpha, beta, stride, padding, dilation, split_k, sync, print_module, stream)
| python/cutlass/op/conv.py/0 | {
"file_path": "python/cutlass/op/conv.py",
"repo_id": "python",
"token_count": 18073
} | 42 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Data types and tags used for emitting CUTLASS C++ kernels
"""
import enum
import re
# The following block implements enum.auto() for Python 3.5 variants that don't include it such
# as the default 3.5.2 on Ubuntu 16.04.
#
# https://codereview.stackexchange.com/questions/177309/reimplementing-pythons-enum-auto-for-compatibility
try:
from enum import auto as enum_auto
except ImportError:
__cutlass_library_auto_enum = 0
def enum_auto() -> int:
global __cutlass_library_auto_enum
i = __cutlass_library_auto_enum
__cutlass_library_auto_enum += 1
return i
###################################################################################################
#
class GeneratorTarget(enum.Enum):
Library = enum_auto()
#
GeneratorTargetNames = {
GeneratorTarget.Library: 'library'
}
#
###################################################################################################
#
class DataType(enum.Enum):
void = enum_auto() # primarily used to disable C tensor for epilogues
b1 = enum_auto()
u4 = enum_auto()
u8 = enum_auto()
u16 = enum_auto()
u32 = enum_auto()
u64 = enum_auto()
s4 = enum_auto()
s8 = enum_auto()
s16 = enum_auto()
s32 = enum_auto()
s64 = enum_auto()
e4m3 = enum_auto()
e5m2 = enum_auto()
f16 = enum_auto()
bf16 = enum_auto()
f32 = enum_auto()
tf32 = enum_auto()
f64 = enum_auto()
cf16 = enum_auto()
cbf16 = enum_auto()
cf32 = enum_auto()
ctf32 = enum_auto()
cf64 = enum_auto()
cs4 = enum_auto()
cs8 = enum_auto()
cs16 = enum_auto()
cs32 = enum_auto()
cs64 = enum_auto()
cu4 = enum_auto()
cu8 = enum_auto()
cu16 = enum_auto()
cu32 = enum_auto()
cu64 = enum_auto()
invalid = enum_auto()
#
ShortDataTypeNames = {
DataType.s32: 'i',
DataType.e4m3: 'e4m3',
DataType.e5m2: 'e5m2',
DataType.f16: 'h',
DataType.f32: 's',
DataType.f64: 'd',
DataType.cf32: 'c',
DataType.cf64: 'z',
}
#
DataTypeNames = {
DataType.void: "void",
DataType.b1: "b1",
DataType.u4: "u4",
DataType.u8: "u8",
DataType.u16: "u16",
DataType.u32: "u32",
DataType.u64: "u64",
DataType.s4: "s4",
DataType.s8: "s8",
DataType.s16: "s16",
DataType.s32: "s32",
DataType.s64: "s64",
DataType.e4m3: 'e4m3',
DataType.e5m2: 'e5m2',
DataType.f16: "f16",
DataType.bf16: "bf16",
DataType.f32: "f32",
DataType.tf32: "tf32",
DataType.f64: "f64",
DataType.cf16: "cf16",
DataType.cbf16: "cbf16",
DataType.cf32: "cf32",
DataType.ctf32: "ctf32",
DataType.cf64: "cf64",
DataType.cu4: "cu4",
DataType.cu8: "cu8",
DataType.cu16: "cu16",
DataType.cu32: "cu32",
DataType.cu64: "cu64",
DataType.cs4: "cs4",
DataType.cs8: "cs8",
DataType.cs16: "cs16",
DataType.cs32: "cs32",
DataType.cs64: "cs64",
}
DataTypeTag = {
DataType.void: "void",
DataType.b1: "cutlass::uint1b_t",
DataType.u4: "cutlass::uint4b_t",
DataType.u8: "uint8_t",
DataType.u16: "uint16_t",
DataType.u32: "uint32_t",
DataType.u64: "uint64_t",
DataType.s4: "cutlass::int4b_t",
DataType.s8: "int8_t",
DataType.s16: "int16_t",
DataType.s32: "int32_t",
DataType.s64: "int64_t",
DataType.e4m3: 'cutlass::float_e4m3_t',
DataType.e5m2: 'cutlass::float_e5m2_t',
DataType.f16: "cutlass::half_t",
DataType.bf16: "cutlass::bfloat16_t",
DataType.f32: "float",
DataType.tf32: "cutlass::tfloat32_t",
DataType.f64: "double",
DataType.cf16: "cutlass::complex<cutlass::half_t>",
DataType.cbf16: "cutlass::complex<cutlass::bfloat16_t>",
DataType.cf32: "cutlass::complex<float>",
DataType.ctf32: "cutlass::complex<cutlass::tfloat32_t>",
DataType.cf64: "cutlass::complex<double>",
DataType.cu4: "cutlass::complex<cutlass::uint4b_t>",
DataType.cu8: "cutlass::complex<cutlass::uint8_t>",
DataType.cu16: "cutlass::complex<cutlass::uint16_t>",
DataType.cu32: "cutlass::complex<cutlass::uint32_t>",
DataType.cu64: "cutlass::complex<cutlass::uint64_t>",
DataType.cs4: "cutlass::complex<cutlass::int4b_t>",
DataType.cs8: "cutlass::complex<cutlass::int8_t>",
DataType.cs16: "cutlass::complex<cutlass::int16_t>",
DataType.cs32: "cutlass::complex<cutlass::int32_t>",
DataType.cs64: "cutlass::complex<cutlass::int64_t>",
}
DataTypeSize = {
DataType.void: 0,
DataType.b1: 1,
DataType.u4: 4,
DataType.u8: 8,
DataType.u16: 16,
DataType.u32: 32,
DataType.u64: 64,
DataType.s4: 4,
DataType.s8: 8,
DataType.s16: 16,
DataType.s32: 32,
DataType.s64: 64,
DataType.e4m3: 8,
DataType.e5m2: 8,
DataType.f16: 16,
DataType.bf16: 16,
DataType.f32: 32,
DataType.tf32: 32,
DataType.f64: 64,
DataType.cf16: 32,
DataType.cbf16: 32,
DataType.cf32: 64,
DataType.ctf32: 32,
DataType.cf64: 128,
DataType.cu4: 8,
DataType.cu8: 16,
DataType.cu16: 32,
DataType.cu32: 64,
DataType.cu64: 128,
DataType.cs4: 8,
DataType.cs8: 16,
DataType.cs16: 32,
DataType.cs32: 64,
DataType.cs64: 128,
}
###################################################################################################
#
class BlasMode(enum.Enum):
symmetric = enum_auto()
hermitian = enum_auto()
#
BlasModeTag = {
BlasMode.symmetric: 'cutlass::BlasMode::kSymmetric',
BlasMode.hermitian: 'cutlass::BlasMode::kHermitian',
}
#
class ComplexTransform(enum.Enum):
none = enum_auto()
conj = enum_auto()
#
ComplexTransformTag = {
ComplexTransform.none: 'cutlass::ComplexTransform::kNone',
ComplexTransform.conj: 'cutlass::ComplexTransform::kConjugate',
}
# Used for cutlass3x complex kernel collective mainloop builder instantiation
ComplexTransformTag3x = {
ComplexTransform.none: 'cute::identity',
ComplexTransform.conj: 'cute::conjugate',
}
#
RealComplexBijection = [
(DataType.f16, DataType.cf16),
(DataType.f32, DataType.cf32),
(DataType.f64, DataType.cf64),
]
#
def is_complex(data_type):
for r, c in RealComplexBijection:
if data_type == c:
return True
return False
#
def get_complex_from_real(real_type):
for r, c in RealComplexBijection:
if real_type == r:
return c
return DataType.invalid
#
def get_real_from_complex(complex_type):
for r, c in RealComplexBijection:
if complex_type == c:
return r
return DataType.invalid
#
class ComplexMultiplyOp(enum.Enum):
multiply_add = enum_auto()
gaussian = enum_auto()
###################################################################################################
#
class MathOperation(enum.Enum):
multiply_add = enum_auto()
multiply_add_saturate = enum_auto()
multiply_add_mixed_input_upcast = enum_auto()
xor_popc = enum_auto()
and_popc = enum_auto()
multiply_add_fast_bf16 = enum_auto()
multiply_add_fast_f16 = enum_auto()
multiply_add_fast_f32 = enum_auto()
multiply_add_complex_fast_f32 = enum_auto()
multiply_add_complex = enum_auto()
multiply_add_complex_gaussian = enum_auto()
multiply_add_fast_accum = enum_auto()
#
MathOperationTag = {
MathOperation.multiply_add: 'cutlass::arch::OpMultiplyAdd',
MathOperation.multiply_add_saturate: 'cutlass::arch::OpMultiplyAddSaturate',
MathOperation.multiply_add_mixed_input_upcast: 'cutlass::arch::OpMultiplyAddMixedInputUpcast',
MathOperation.xor_popc: 'cutlass::arch::OpXorPopc',
MathOperation.and_popc: 'cutlass::arch::OpAndPopc',
MathOperation.multiply_add_fast_bf16: 'cutlass::arch::OpMultiplyAddFastBF16',
MathOperation.multiply_add_fast_f16: 'cutlass::arch::OpMultiplyAddFastF16',
MathOperation.multiply_add_fast_f32: 'cutlass::arch::OpMultiplyAddFastF32',
MathOperation.multiply_add_complex_fast_f32: 'cutlass::arch::OpMultiplyAddComplexFastF32',
MathOperation.multiply_add_complex: 'cutlass::arch::OpMultiplyAddComplex',
MathOperation.multiply_add_complex_gaussian: 'cutlass::arch::OpMultiplyAddGaussianComplex',
MathOperation.multiply_add_fast_accum: 'cutlass::arch::OpMultiplyAddFastAccum',
}
###################################################################################################
#
class LayoutType(enum.Enum):
ColumnMajor = enum_auto()
RowMajor = enum_auto()
ColumnMajorInterleaved2 = enum_auto()
RowMajorInterleaved2 = enum_auto()
ColumnMajorInterleaved32 = enum_auto()
RowMajorInterleaved32 = enum_auto()
ColumnMajorInterleaved64 = enum_auto()
RowMajorInterleaved64 = enum_auto()
TensorNWC = enum_auto()
TensorNHWC = enum_auto()
TensorNDHWC = enum_auto()
TensorNCHW = enum_auto()
TensorNGHWC = enum_auto()
TensorNC32HW32 = enum_auto()
TensorNC64HW64 = enum_auto()
TensorC32RSK32 = enum_auto()
TensorC64RSK64 = enum_auto()
TensorKCS = enum_auto()
TensorKCSR = enum_auto()
TensorKCSRT = enum_auto()
#
LayoutTag = {
LayoutType.ColumnMajor: 'cutlass::layout::ColumnMajor',
LayoutType.RowMajor: 'cutlass::layout::RowMajor',
LayoutType.ColumnMajorInterleaved2: 'cutlass::layout::ColumnMajorInterleaved<2>',
LayoutType.RowMajorInterleaved2: 'cutlass::layout::RowMajorInterleaved<2>',
LayoutType.ColumnMajorInterleaved32: 'cutlass::layout::ColumnMajorInterleaved<32>',
LayoutType.RowMajorInterleaved32: 'cutlass::layout::RowMajorInterleaved<32>',
LayoutType.ColumnMajorInterleaved64: 'cutlass::layout::ColumnMajorInterleaved<64>',
LayoutType.RowMajorInterleaved64: 'cutlass::layout::RowMajorInterleaved<64>',
LayoutType.TensorNWC: 'cutlass::layout::TensorNWC',
LayoutType.TensorNHWC: 'cutlass::layout::TensorNHWC',
LayoutType.TensorNDHWC: 'cutlass::layout::TensorNDHWC',
LayoutType.TensorNCHW: 'cutlass::layout::TensorNCHW',
LayoutType.TensorNGHWC: 'cutlass::layout::TensorNGHWC',
LayoutType.TensorNC32HW32: 'cutlass::layout::TensorNCxHWx<32>',
LayoutType.TensorC32RSK32: 'cutlass::layout::TensorCxRSKx<32>',
LayoutType.TensorNC64HW64: 'cutlass::layout::TensorNCxHWx<64>',
LayoutType.TensorC64RSK64: 'cutlass::layout::TensorCxRSKx<64>',
LayoutType.TensorKCS: 'cutlass::layout::TensorKCS',
LayoutType.TensorKCSR: 'cutlass::layout::TensorKCSR',
LayoutType.TensorKCSRT: 'cutlass::layout::TensorKCSRT'
}
#
TransposedLayout = {
LayoutType.ColumnMajor: LayoutType.RowMajor,
LayoutType.RowMajor: LayoutType.ColumnMajor,
LayoutType.ColumnMajorInterleaved2: LayoutType.RowMajorInterleaved2,
LayoutType.RowMajorInterleaved2: LayoutType.ColumnMajorInterleaved2,
LayoutType.ColumnMajorInterleaved32: LayoutType.RowMajorInterleaved32,
LayoutType.RowMajorInterleaved32: LayoutType.ColumnMajorInterleaved32,
LayoutType.ColumnMajorInterleaved64: LayoutType.RowMajorInterleaved64,
LayoutType.RowMajorInterleaved64: LayoutType.ColumnMajorInterleaved64,
LayoutType.TensorNHWC: LayoutType.TensorNHWC
}
#
ShortLayoutTypeNames = {
LayoutType.ColumnMajor: 'n',
LayoutType.ColumnMajorInterleaved2: 'n2',
LayoutType.ColumnMajorInterleaved32: 'n32',
LayoutType.ColumnMajorInterleaved64: 'n64',
LayoutType.RowMajor: 't',
LayoutType.RowMajorInterleaved2: 't2',
LayoutType.RowMajorInterleaved32: 't32',
LayoutType.RowMajorInterleaved64: 't64',
LayoutType.TensorNWC: 'nwc',
LayoutType.TensorNHWC: 'nhwc',
LayoutType.TensorNDHWC: 'ndhwc',
LayoutType.TensorNCHW: 'nchw',
LayoutType.TensorNGHWC: 'nghwc',
LayoutType.TensorNC32HW32: 'nc32hw32',
LayoutType.TensorNC64HW64: 'nc64hw64',
LayoutType.TensorC32RSK32: 'c32rsk32',
LayoutType.TensorC64RSK64: 'c64rsk64',
LayoutType.TensorKCS: 'kcs',
LayoutType.TensorKCSR: 'kcsr',
LayoutType.TensorKCSRT: 'kcsrt'
}
#
ShortComplexLayoutNames = {
(LayoutType.ColumnMajor, ComplexTransform.none): 'n',
(LayoutType.ColumnMajor, ComplexTransform.conj): 'c',
(LayoutType.RowMajor, ComplexTransform.none): 't',
(LayoutType.RowMajor, ComplexTransform.conj): 'h'
}
###################################################################################################
class KernelScheduleType(enum.Enum):
ScheduleAuto = enum_auto()
Multistage = enum_auto()
CpAsyncWarpSpecialized = enum_auto()
CpAsyncWarpSpecializedPingpong = enum_auto()
CpAsyncWarpSpecializedCooperative = enum_auto()
Tma = enum_auto()
TmaWarpSpecialized = enum_auto()
TmaWarpSpecializedPingpong = enum_auto()
TmaWarpSpecializedCooperative = enum_auto()
TmaWarpSpecializedFP8FastAccum = enum_auto()
TmaWarpSpecializedCooperativeFP8FastAccum = enum_auto()
TmaWarpSpecializedPingpongFP8FastAccum = enum_auto()
ImplicitTmaWarpSpecializedSm90 = enum_auto()
#
KernelScheduleTag = {
KernelScheduleType.ScheduleAuto: 'cutlass::gemm::collective::KernelScheduleAuto',
KernelScheduleType.Multistage: 'cutlass::gemm::KernelMultistage',
KernelScheduleType.CpAsyncWarpSpecialized: 'cutlass::gemm::KernelCpAsyncWarpSpecialized',
KernelScheduleType.CpAsyncWarpSpecializedPingpong: 'cutlass::gemm::KernelCpAsyncWarpSpecializedPingpong',
KernelScheduleType.CpAsyncWarpSpecializedCooperative: 'cutlass::gemm::KernelCpAsyncWarpSpecializedCooperative',
KernelScheduleType.Tma: 'cutlass::gemm::KernelTma',
KernelScheduleType.TmaWarpSpecialized: 'cutlass::gemm::KernelTmaWarpSpecialized',
KernelScheduleType.TmaWarpSpecializedPingpong: 'cutlass::gemm::KernelTmaWarpSpecializedPingpong',
KernelScheduleType.TmaWarpSpecializedCooperative: 'cutlass::gemm::KernelTmaWarpSpecializedCooperative',
KernelScheduleType.TmaWarpSpecializedFP8FastAccum: 'cutlass::gemm::KernelTmaWarpSpecializedFP8FastAccum',
KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum: 'cutlass::gemm::KernelTmaWarpSpecializedCooperativeFP8FastAccum',
KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum: 'cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum',
KernelScheduleType.ImplicitTmaWarpSpecializedSm90: 'cutlass::conv::KernelImplicitTmaWarpSpecializedSm90',
}
#
KernelScheduleSuffixes = {
KernelScheduleType.ScheduleAuto: '',
KernelScheduleType.Multistage: '_cpasync',
KernelScheduleType.CpAsyncWarpSpecialized: '_cpasync_warpspecialized',
KernelScheduleType.CpAsyncWarpSpecializedPingpong: '_cpasync_warpspecialized_pingpong',
KernelScheduleType.CpAsyncWarpSpecializedCooperative: '_cpasync_warpspecialized_cooperative',
KernelScheduleType.Tma: '_unspecialized',
KernelScheduleType.TmaWarpSpecialized: '_warpspecialized',
KernelScheduleType.TmaWarpSpecializedPingpong: '_warpspecialized_pingpong',
KernelScheduleType.TmaWarpSpecializedCooperative: '_warpspecialized_cooperative',
KernelScheduleType.TmaWarpSpecializedFP8FastAccum: '_warpspecialized_fp8_fastaccum',
KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum: '_warpspecialized_cooperative_fp8_fastaccum',
KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum: '_warpspecialized_pingpong_fp8_fastaccum',
KernelScheduleType.ImplicitTmaWarpSpecializedSm90: '_warpspecialized',
}
class EpilogueScheduleType(enum.Enum):
ScheduleAuto = enum_auto()
EpilogueTransposed = enum_auto()
NoSmemWarpSpecialized = enum_auto()
TmaWarpSpecialized = enum_auto()
TmaWarpSpecializedCooperative = enum_auto()
#
EpilogueScheduleTag = {
EpilogueScheduleType.ScheduleAuto: 'cutlass::epilogue::collective::EpilogueScheduleAuto',
EpilogueScheduleType.EpilogueTransposed: 'cutlass::gemm::EpilogueTransposed',
EpilogueScheduleType.NoSmemWarpSpecialized: 'cutlass::epilogue::NoSmemWarpSpecialized',
EpilogueScheduleType.TmaWarpSpecialized: 'cutlass::epilogue::TmaWarpSpecialized',
EpilogueScheduleType.TmaWarpSpecializedCooperative: 'cutlass::epilogue::TmaWarpSpecializedCooperative',
}
#
EpilogueScheduleSuffixes = {
EpilogueScheduleType.ScheduleAuto: '',
EpilogueScheduleType.EpilogueTransposed: '',
EpilogueScheduleType.NoSmemWarpSpecialized: '_epi_nosmem',
EpilogueScheduleType.TmaWarpSpecialized: '_epi_tma',
EpilogueScheduleType.TmaWarpSpecializedCooperative: '_epi_tma',
}
class EpilogueFunctor3x(enum.Enum):
LinearCombination = enum_auto()
#
EpilogueFunctor3xTag = {
EpilogueFunctor3x.LinearCombination: 'cutlass::epilogue::fusion::LinearCombination',
}
class TileSchedulerType(enum.Enum):
Default = enum_auto()
Persistent = enum_auto()
StreamK = enum_auto()
#
TileSchedulerTag = {
TileSchedulerType.Default: 'void',
TileSchedulerType.Persistent: 'cutlass::gemm::PersistentScheduler',
TileSchedulerType.StreamK: 'cutlass::gemm::StreamKScheduler',
}
#
TileSchedulerSuffixes = {
TileSchedulerType.Default: '',
TileSchedulerType.Persistent: '',
TileSchedulerType.StreamK: '_stream_k',
}
###################################################################################################
#
class SideMode(enum.Enum):
Left = enum_auto()
Right = enum_auto()
#
SideModeTag = {
SideMode.Left: 'cutlass::SideMode::kLeft',
SideMode.Right: 'cutlass::SideMode::kRight'
}
#
ShortSideModeNames = {
SideMode.Left: 'ls',
SideMode.Right: 'rs'
}
###################################################################################################
#
class FillMode(enum.Enum):
Lower = enum_auto()
Upper = enum_auto()
#
FillModeTag = {
FillMode.Lower: 'cutlass::FillMode::kLower',
FillMode.Upper: 'cutlass::FillMode::kUpper'
}
#
ShortFillModeNames = {
FillMode.Lower: 'l',
FillMode.Upper: 'u'
}
###################################################################################################
#
class DiagType(enum.Enum):
NonUnit = enum_auto()
Unit = enum_auto()
#
DiagTypeTag = {
DiagType.NonUnit: 'cutlass::DiagType::kNonUnit',
DiagType.Unit: 'cutlass::DiagType::kUnit'
}
#
ShortDiagTypeNames = {
DiagType.NonUnit: 'nu',
DiagType.Unit: 'un'
}
###################################################################################################
#
class OpcodeClass(enum.Enum):
Simt = enum_auto()
TensorOp = enum_auto()
WmmaTensorOp = enum_auto()
SparseTensorOp = enum_auto()
OpcodeClassNames = {
OpcodeClass.Simt: 'simt',
OpcodeClass.TensorOp: 'tensorop',
OpcodeClass.WmmaTensorOp: 'wmma_tensorop',
}
OpcodeClassTag = {
OpcodeClass.Simt: 'cutlass::arch::OpClassSimt',
OpcodeClass.TensorOp: 'cutlass::arch::OpClassTensorOp',
OpcodeClass.WmmaTensorOp: 'cutlass::arch::OpClassWmmaTensorOp',
}
###################################################################################################
#
class OperationKind(enum.Enum):
Gemm = enum_auto()
RankK = enum_auto()
Rank2K = enum_auto()
Trmm = enum_auto()
Symm = enum_auto()
Conv2d = enum_auto()
Conv3d = enum_auto()
#
OperationKindNames = {
OperationKind.Gemm: 'gemm'
, OperationKind.RankK: 'rank_k'
, OperationKind.Rank2K: 'rank_2k'
, OperationKind.Trmm: 'trmm'
, OperationKind.Symm: 'symm'
, OperationKind.Conv2d: 'conv2d'
, OperationKind.Conv3d: 'conv3d'
}
#
class Target(enum.Enum):
library = enum_auto()
#
ArchitectureNames = {
50: 'maxwell',
60: 'pascal',
61: 'pascal',
70: 'volta',
75: 'turing',
80: 'ampere',
89: 'ada',
90: 'hopper'
}
#
SharedMemPerCC = {
70: 96, # 96KB of SMEM
72: 96, # 96KB of SMEM
75: 64, # 64KB of SMEM
80: 163, # 163KB of SMEM - 1KB reserved for the driver
86: 99, # 99KB of SMEM - 1KB reserved for the driver
87: 163, # 163KB of SMEM - 1KB reserved for the driver
89: 99, # 99KB of SMEM - 1KB reserved for the driver
90: 227, # 227KB of SMEM - 1KB reserved for the driver
}
###################################################################################################
#
def SubstituteTemplate(template, values):
text = template
changed = True
while changed:
changed = False
for key, value in values.items():
regex = "\\$\\{%s\\}" % key
newtext = re.sub(regex, value, text)
if newtext != text:
changed = True
text = newtext
return text
###################################################################################################
#
class GemmKind(enum.Enum):
Gemm = enum_auto()
Sparse = enum_auto()
Universal = enum_auto()
Universal3x = enum_auto()
SparseUniversal3x = enum_auto()
PlanarComplex = enum_auto()
PlanarComplexArray = enum_auto()
Grouped = enum_auto()
#
GemmKindNames = {
GemmKind.Gemm: "gemm",
GemmKind.Sparse: "spgemm",
GemmKind.Universal: "gemm",
GemmKind.Universal3x: "gemm",
GemmKind.SparseUniversal3x: "spgemm",
GemmKind.PlanarComplex: "gemm_planar_complex",
GemmKind.PlanarComplexArray: "gemm_planar_complex_array",
GemmKind.Grouped: "gemm_grouped",
}
#
class RankKKind(enum.Enum):
Universal = enum_auto()
#
RankKKindNames = {
RankKKind.Universal: "rank_k"
}
#
class TrmmKind(enum.Enum):
Universal = enum_auto()
#
TrmmKindNames = {
TrmmKind.Universal: "trmm"
}
#
class SymmKind(enum.Enum):
Universal = enum_auto()
#
SymmKindNames = {
SymmKind.Universal: "symm"
}
#
class EpilogueFunctor(enum.Enum):
LinearCombination = enum_auto()
LinearCombinationClamp = enum_auto()
#
EpilogueFunctorTag = {
EpilogueFunctor.LinearCombination: 'cutlass::epilogue::thread::LinearCombination',
EpilogueFunctor.LinearCombinationClamp: 'cutlass::epilogue::thread::LinearCombinationClamp',
}
#
class SwizzlingFunctor(enum.Enum):
Identity1 = enum_auto()
Identity2 = enum_auto()
Identity4 = enum_auto()
Identity8 = enum_auto()
Horizontal = enum_auto()
StridedDgradIdentity1 = enum_auto()
StridedDgradIdentity4 = enum_auto()
StridedDgradHorizontal = enum_auto()
StreamK = enum_auto()
#
SwizzlingFunctorTag = {
SwizzlingFunctor.Identity1: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>',
SwizzlingFunctor.Identity2: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<2>',
SwizzlingFunctor.Identity4: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<4>',
SwizzlingFunctor.Identity8: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>',
SwizzlingFunctor.Horizontal: 'cutlass::gemm::threadblock::GemmHorizontalThreadblockSwizzle',
SwizzlingFunctor.StridedDgradIdentity1: 'cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<1>',
SwizzlingFunctor.StridedDgradIdentity4: 'cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<4>',
SwizzlingFunctor.StridedDgradHorizontal: 'cutlass::conv::threadblock::StridedDgradHorizontalThreadblockSwizzle',
SwizzlingFunctor.StreamK: 'cutlass::gemm::threadblock::ThreadblockSwizzleStreamK',
}
#
class GroupScheduleMode(enum.Enum):
Device = enum_auto(),
Host = enum_auto()
#
GroupScheduleModeTag = {
GroupScheduleMode.Device: 'cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly',
GroupScheduleMode.Host: 'cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute'
}
#
ShortGroupScheduleModeNames = {
GroupScheduleMode.Device: 'Device',
GroupScheduleMode.Host: 'Host'
}
###################################################################################################
#
class ConvKind(enum.IntEnum):
Fprop = 0
Dgrad = 1
Wgrad = 2
#
ConvKindTag = {
ConvKind.Fprop: 'cutlass::conv::Operator::kFprop',
ConvKind.Dgrad: 'cutlass::conv::Operator::kDgrad',
ConvKind.Wgrad: 'cutlass::conv::Operator::kWgrad'
}
ConvKindNames = {
ConvKind.Fprop: 'fprop',
ConvKind.Dgrad: 'dgrad',
ConvKind.Wgrad: 'wgrad',
}
class ConvMode(enum.IntEnum):
CrossCorrelation = 0
Convolution = 1
#
class IteratorAlgorithm(enum.Enum):
Analytic = 0
Optimized = 1
FixedChannels = 2
FewChannels = 3
FixedStrideDilation = 4
#
IteratorAlgorithmTag = {
IteratorAlgorithm.Analytic: 'cutlass::conv::IteratorAlgorithm::kAnalytic',
IteratorAlgorithm.Optimized: 'cutlass::conv::IteratorAlgorithm::kOptimized',
IteratorAlgorithm.FixedChannels: 'cutlass::conv::IteratorAlgorithm::kFixedChannels',
IteratorAlgorithm.FewChannels: 'cutlass::conv::IteratorAlgorithm::kFewChannels',
IteratorAlgorithm.FixedStrideDilation: 'cutlass::conv::IteratorAlgorithm::kFixedStrideDilation'
}
IteratorAlgorithmNames = {
IteratorAlgorithm.Analytic: 'analytic',
IteratorAlgorithm.Optimized: 'optimized',
IteratorAlgorithm.FixedChannels: 'fixed_channels',
IteratorAlgorithm.FewChannels: 'few_channels',
IteratorAlgorithm.FixedStrideDilation: 'fixed_stride_dilation'
}
#
class StrideSupport(enum.Enum):
Strided = 0
Unity = 1
Fixed = 2
#
StrideSupportTag = {
StrideSupport.Strided: 'cutlass::conv::StrideSupport::kStrided',
StrideSupport.Unity: 'cutlass::conv::StrideSupport::kUnity',
StrideSupport.Fixed: 'cutlass::conv::StrideSupport::kFixed'
}
StrideSupportNames = {
StrideSupport.Strided: '',
StrideSupport.Unity: 'unity_stride',
StrideSupport.Fixed: 'fixed_stride'
}
#
class GroupMode(enum.Enum):
NoneGroup = enum_auto() # dense conv (G=1)
SingleGroup = enum_auto() # grouped convolution (single group per CTA)
MultipleGroup = enum_auto() # grouped convolution ( multiple groups per CTA)
Depthwise = enum_auto() # Depthwise convolution ( C=K=G )
#
GroupModeTag = {
GroupMode.NoneGroup: 'cutlass::conv::GroupMode::kNone',
GroupMode.SingleGroup: 'cutlass::conv::GroupMode::kSingleGroup',
GroupMode.MultipleGroup: 'cutlass::conv::GroupMode::kMultipleGroup',
GroupMode.Depthwise: 'cutlass::conv::GroupMode::kDepthwise',
}
GroupModeNames = {
GroupMode.NoneGroup: '',
GroupMode.SingleGroup: 'single_group',
GroupMode.MultipleGroup: 'multiple_group',
GroupMode.Depthwise: 'depthwise',
}
###################################################################################################
#
class MathInstruction:
def __init__(self,
instruction_shape, \
element_a, element_b, element_accumulator, \
opcode_class, math_operation = MathOperation.multiply_add \
):
self.instruction_shape = instruction_shape
self.element_a = element_a
self.element_b = element_b
self.element_accumulator = element_accumulator
self.opcode_class = opcode_class
self.math_operation = math_operation
#
class TileDescription:
def __init__(self, threadblock_shape, stages, warp_count, math_instruction, min_compute, max_compute, cluster_shape = [1,1,1]):
self.threadblock_shape = threadblock_shape
self.tile_shape = threadblock_shape
self.stages = stages
self.warp_count = warp_count
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
self.cluster_shape = cluster_shape
def procedural_name(self):
if self.minimum_compute_capability >= 90:
return "{tbm}x{tbn}x{tbk}_{cm}x{cn}x{ck}_{s}".format(
tbm = self.threadblock_shape[0],
tbn = self.threadblock_shape[1],
tbk = self.threadblock_shape[2],
cm = self.cluster_shape[0],
cn = self.cluster_shape[1],
ck = self.cluster_shape[2],
s = self.stages)
else:
return "%dx%d_%dx%d" % (self.threadblock_shape[0], self.threadblock_shape[1], self.threadblock_shape[2], self.stages)
#
class Direct2dConvFixedStrideDilationTileDescription:
def __init__(self, threadblock_output_shape, filter_shape, stages, stride, dilation, warp_count, math_instruction, min_compute, max_compute):
self.threadblock_shape = [threadblock_output_shape[0]*threadblock_output_shape[1]*threadblock_output_shape[2], threadblock_output_shape[3], filter_shape[0]*filter_shape[1]]
self.threadblock_output_shape = threadblock_output_shape
self.filter_shape = filter_shape
self.stages = stages
self.warp_count = warp_count
self.stride = stride
self.dilation = dilation
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
def procedural_name(self):
str_name = "%dx%dx%d_%dx%dx%dx%d_%d_filter%dx%d" % (self.threadblock_shape[0],
self.threadblock_shape[1],
self.threadblock_shape[2],
self.threadblock_output_shape[0],
self.threadblock_output_shape[1],
self.threadblock_output_shape[2],
self.threadblock_output_shape[3],
self.stages,
self.filter_shape[0],
self.filter_shape[1])
# Fixed Strided and dilation
if self.stride != [-1, -1] and self.dilation != [-1, -1]:
str_name += "_stride%dx%d_dilation%dx%d" % (self.stride[0],
self.stride[1],
self.dilation[0],
self.dilation[1])
return str_name
#
class Direct2dConvFixedStrideDilationTileDescription:
def __init__(self, threadblock_output_shape, filter_shape, stages, stride, dilation, warp_count, math_instruction, min_compute, max_compute):
self.threadblock_shape = [threadblock_output_shape[0]*threadblock_output_shape[1]*threadblock_output_shape[2], threadblock_output_shape[3], filter_shape[0]*filter_shape[1]]
self.threadblock_output_shape = threadblock_output_shape
self.filter_shape = filter_shape
self.stages = stages
self.warp_count = warp_count
self.stride = stride
self.dilation = dilation
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
def procedural_name(self):
str_name = "%dx%dx%d_%dx%dx%dx%d_%d_filter%dx%d" % (self.threadblock_shape[0],
self.threadblock_shape[1],
self.threadblock_shape[2],
self.threadblock_output_shape[0],
self.threadblock_output_shape[1],
self.threadblock_output_shape[2],
self.threadblock_output_shape[3],
self.stages,
self.filter_shape[0],
self.filter_shape[1])
# Fixed Strided and dilation
if self.stride != [-1, -1] and self.dilation != [-1, -1]:
str_name += "_stride%dx%d_dilation%dx%d" % (self.stride[0],
self.stride[1],
self.dilation[0],
self.dilation[1])
return str_name
#
class TensorDescription:
def __init__(self, element, layout, alignment = 1, complex_transform = ComplexTransform.none):
self.element = element
self.layout = layout
self.alignment = alignment
self.complex_transform = complex_transform
#
class SymmetricTensorDescription:
def __init__(self, element, layout, fill_mode, alignment = 1, complex_transform = ComplexTransform.none, side_mode = SideMode.Left):
self.element = element
self.layout = layout
self.fill_mode = fill_mode
self.alignment = alignment
self.complex_transform = complex_transform
self.side_mode = side_mode
#
class TriangularTensorDescription:
def __init__(self, element, layout, side_mode, fill_mode, diag_type, alignment = 1, complex_transform = ComplexTransform.none):
self.element = element
self.layout = layout
self.side_mode = side_mode
self.fill_mode = fill_mode
self.diag_type = diag_type
self.alignment = alignment
self.complex_transform = complex_transform
#
def CalculateSmemUsage(operation):
cta_shape = operation.tile_description.threadblock_shape
stages = operation.tile_description.stages
if operation.operation_kind == OperationKind.Gemm and operation.gemm_kind == GemmKind.Sparse:
# Elements represented by 8 bits of metadata (based on 4:8, 2:4 or 1:2 sparsity)
if DataTypeSize[operation.A.element] == 32:
elements_per_8b_md = 2
elif DataTypeSize[operation.A.element] == 4:
elements_per_8b_md = 8
else:
elements_per_8b_md = 4
smem_per_stage = DataTypeSize[operation.A.element] * cta_shape[0] * (cta_shape[2] // 2) // 8 + \
DataTypeSize[operation.B.element] * cta_shape[1] * cta_shape[2] // 8 + \
cta_shape[0] * (cta_shape[2] // 2) // elements_per_8b_md
else:
# Few BLAS3 operations only have A tensor
data_type_size_a = DataTypeSize[operation.A.element]
data_type_size_b = DataTypeSize[operation.A.element]
if operation.is_mixed_input():
data_type_size_b = DataTypeSize[operation.B.element]
smem_per_stage = data_type_size_a * cta_shape[0] * cta_shape[2] // 8 + \
data_type_size_b * cta_shape[1] * cta_shape[2] // 8
smem_usage = smem_per_stage * stages
return (smem_usage >> 10)
class GemmUniversalMode(enum.IntEnum):
"""
Types corresponding to GemmUniversalMode
"""
Gemm = 0
GemmSplitKParallel = 1
Batched = 2
Array = 3
class SplitKMode(enum.IntEnum):
"""
Types corresponding to SplitKMode
"""
NoneSplitK = 0
Serial = 1
Parallel = 2
| python/cutlass_library/library.py/0 | {
"file_path": "python/cutlass_library/library.py",
"repo_id": "python",
"token_count": 13365
} | 43 |
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-copy" width="44" height="44" viewBox="0 0 24 24" stroke-width="1.5" stroke="#000000" fill="none" stroke-linecap="round" stroke-linejoin="round">
<path stroke="none" d="M0 0h24v24H0z" fill="none"/>
<rect x="8" y="8" width="12" height="12" rx="2" />
<path d="M16 8v-2a2 2 0 0 0 -2 -2h-8a2 2 0 0 0 -2 2v8a2 2 0 0 0 2 2h2" />
</svg>
| python/docs/_static/copy-button.svg/0 | {
"file_path": "python/docs/_static/copy-button.svg",
"repo_id": "python",
"token_count": 185
} | 44 |
Examples
==================
.. toctree::
:maxdepth: 5
Basic GEMM <externals/00_basic_gemm.nblink>
Epilogue <externals/01_epilogue.nblink>
PyTorch Extension <externals/02_pytorch_extension_grouped_gemm.nblink>
| python/docs_src/source/examples.rst/0 | {
"file_path": "python/docs_src/source/examples.rst",
"repo_id": "python",
"token_count": 95
} | 45 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Testbed for running device-level Conv2Ds with absolute maximum calculation and scaling
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "conv2d_problems.h"
#include "../../common/cutlass_unit_test.h"
#include "../../gemm/device/testbed_utils.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_reduce.h"
namespace test {
namespace conv {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Conv,
template<typename T> class ActivationFunctor
>
struct TestbedConv2dWithAbsMax {
using ElementAccumulator = typename Conv::ElementAccumulator;
using ElementCompute = typename Conv::UnderlyingKernel::Epilogue::OutputOp::ElementCompute;
using ElementScalingFactor = typename Conv::EpilogueOutputOp::ElementScalingFactor;
using ElementAbsmax = typename Conv::EpilogueOutputOp::ElementAbsmax;
static cutlass::conv::Operator const kConvolutionalOperator = Conv::kConvolutionalOperator;
static bool const kScaleAux = Conv::EpilogueOutputOp::kIsScalingAndAmaxAuxOutputNeeded;
static bool const kScaleOutput = Conv::EpilogueOutputOp::kIsScalingAndAmaxOutputNeeded;
bool doScaleA;
bool doScaleB;
bool doScaleC;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<typename Conv::ElementA, typename Conv::LayoutA> tensor_A;
cutlass::HostTensor<typename Conv::ElementB, typename Conv::LayoutB> tensor_B;
cutlass::HostTensor<typename Conv::ElementC, typename Conv::LayoutC> tensor_C;
cutlass::HostTensor<typename Conv::EpilogueOutputOp::ElementAuxOutput, typename Conv::LayoutC> tensor_Aux;
cutlass::HostTensor<typename Conv::EpilogueOutputOp::ElementOutput, typename Conv::LayoutC> tensor_D;
cutlass::HostTensor<typename Conv::ElementC, typename Conv::LayoutC> tensor_Vector;
cutlass::HostTensor<ElementAccumulator, typename Conv::LayoutC> tmp_D;
cutlass::HostTensor<typename Conv::EpilogueOutputOp::ElementOutput, typename Conv::LayoutC> reference_D;
cutlass::HostTensor<typename Conv::EpilogueOutputOp::ElementAuxOutput, typename Conv::LayoutC> reference_Aux;
cutlass::HostTensor<ElementScalingFactor, typename Conv::LayoutC> scale_A;
cutlass::HostTensor<ElementScalingFactor, typename Conv::LayoutC> scale_B;
cutlass::HostTensor<ElementScalingFactor, typename Conv::LayoutC> scale_C;
cutlass::HostTensor<ElementScalingFactor, typename Conv::LayoutC> scale_D;
cutlass::HostTensor<ElementScalingFactor, typename Conv::LayoutC> scale_Aux;
cutlass::HostTensor<ElementAbsmax, typename Conv::LayoutC> abs_max_Aux;
cutlass::HostTensor<ElementAbsmax, typename Conv::LayoutC> abs_max_D;
cutlass::HostTensor<ElementAbsmax, typename Conv::LayoutC> reference_abs_max_Aux;
cutlass::HostTensor<ElementAbsmax, typename Conv::LayoutC> reference_abs_max_D;
//
// Methods
//
TestbedConv2dWithAbsMax(
bool scaleA = true,
bool scaleB = true,
bool scaleC = true,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
doScaleA(scaleA), doScaleB(scaleB), doScaleC(scaleC),
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize scaling factors
template <typename Element, typename Layout>
bool initialize_scale_factor(cutlass::TensorView<Element, Layout> view, uint64_t seed, int bits=0) {
cutlass::reference::host::TensorFillRandomUniform(view, seed, double(1.), double(0.), bits);
return true;
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Conv::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::conv::Conv2dProblemSize const &problem_size) {
//
// Allocate the GEMM workspace
//
tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size));
tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_Vector.resize({1, 1, 1, implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size).c()});
reference_D.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size), false);
tmp_D.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size), false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
EXPECT_TRUE(initialize_tensor(tensor_Vector.host_view(), init_C, seed + 2020));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
cutlass::Coord<4> origin(0);
tensor_A.host_view().at(origin) = typename Conv::ElementA(1);
tensor_B.host_view().at(origin) = typename Conv::ElementB(1);
tensor_C.host_view().at(origin) = typename Conv::ElementC(1);
tensor_Vector.host_view().at(origin) = typename Conv::ElementC(1);
cutlass::reference::host::TensorFill(tensor_D.host_view());
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
tensor_Vector.sync_device();
int scale_bits = 2;
if (doScaleA) {
scale_A.resize({1, 1, 1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_A.host_view(), seed + 2021, scale_bits));
scale_A.sync_device();
}
if (doScaleB) {
scale_B.resize({1, 1, 1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_B.host_view(), seed + 2022, scale_bits));
scale_B.sync_device();
}
if (doScaleC) {
scale_C.resize({1, 1, 1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_C.host_view(), seed + 2023, scale_bits));
scale_C.sync_device();
}
if (kScaleOutput) {
scale_D.resize({1, 1, 1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_D.host_view(), seed + 2024, scale_bits));
scale_D.sync_device();
abs_max_D.resize({1, 1, 1, 1});
cutlass::reference::host::TensorFill(abs_max_D.host_view());
abs_max_D.sync_device();
reference_abs_max_D.resize({1, 1, 1, 1});
}
if (kScaleAux) {
tensor_Aux.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
cutlass::reference::host::TensorFill(tensor_Aux.host_view());
tensor_Aux.sync_device();
scale_Aux.resize({1, 1, 1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_Aux.host_view(), seed + 2025, scale_bits));
scale_Aux.sync_device();
abs_max_Aux.resize({1, 1, 1, 1});
cutlass::reference::host::TensorFill(abs_max_Aux.host_view());
abs_max_Aux.sync_device();
reference_Aux.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size), false);
reference_abs_max_Aux.resize({1, 1, 1, 1});
}
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::conv::Conv2dProblemSize const &problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view());
if (kScaleAux) {
tensor_Aux.sync_host();
abs_max_Aux.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Aux.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(abs_max_Aux.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_Aux.host_view()), 0);
passed &= cutlass::reference::host::TensorEquals(reference_Aux.host_view(), tensor_Aux.host_view());
passed &= cutlass::reference::host::TensorEquals(abs_max_Aux.host_view(), reference_abs_max_Aux.host_view());
}
if (kScaleOutput) {
abs_max_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(abs_max_D.host_view()), 0);
passed &= cutlass::reference::host::TensorEquals(abs_max_D.host_view(), reference_abs_max_D.host_view());
}
EXPECT_TRUE(passed) << " mismatched reference";
if (!passed) {
std::ofstream file0("conv_testbed_with_amax_errors_reference.txt");
std::ofstream file1("conv_testbed_with_amax_errors_computed.txt");
std::ofstream file("conv_testbed_with_amax_errors.txt");
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\nVector =\n" << tensor_Vector.host_view()
<< "\nScaleA = " << scale_A.host_view()
<< "\nScaleB = " << scale_B.host_view()
<< "\nScaleC = " << scale_C.host_view()
<< "\nScaleD = " << scale_D.host_view()
<< "\nScaleAux = " << scale_Aux.host_view()
<< std::endl;
file0 << "\n\nReference D =\n" << reference_D.host_view() << std::endl;
file1 << "\n\nComputed D =\n" << tensor_D.host_view() << std::endl;
if (kScaleAux) {
file0 << "\n\nReference Aux =\n" << reference_Aux.host_view() << std::endl;
file1 << "\n\nComputed Aux =\n" << tensor_Aux.host_view() << std::endl;
file0 << "\n\nReference Absmax Aux = " << reference_abs_max_Aux.host_view() << std::endl;
file1 << "\n\nComputed Absmax Aux = " << abs_max_Aux.host_view() << std::endl;
}
if (kScaleOutput) {
file0 << "\n\nReference Absmax D = " << reference_abs_max_D.host_view() << std::endl;
file1 << "\n\nComputed Absmax D = " << abs_max_D.host_view() << std::endl;
}
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
cutlass::conv::Conv2dProblemSize const &problem_size,
ElementCompute alpha,
ElementCompute beta) {
cutlass::Coord<4> origin(0);
ElementCompute scaled_alpha = alpha;
if (doScaleA) {
scaled_alpha *= scale_A.host_view().at(origin);
}
if (doScaleB) {
scaled_alpha *= scale_B.host_view().at(origin);
}
ElementCompute scaled_beta = beta;
if (doScaleC) {
scaled_beta *= scale_C.host_view().at(origin);
}
//
// Verify
//
cutlass::reference::host::Conv2d<
typename Conv::ElementA, typename Conv::LayoutA,
typename Conv::ElementB, typename Conv::LayoutB,
typename Conv::ElementC, typename Conv::LayoutC,
ElementCompute, ElementAccumulator, ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.host_ref(),
tensor_B.host_ref(),
tensor_C.host_ref(),
tmp_D.host_ref(),
scaled_alpha,
scaled_beta
);
ElementCompute tmp_abs_max_Aux(0.);
ElementCompute tmp_abs_max_D(0.);
cutlass::NumericConverter<ElementCompute, typename Conv::ElementC> cvt_c_to_compute;
cutlass::NumericConverter<ElementCompute, ElementAccumulator> cvt_accum_to_compute;
cutlass::NumericConverter<ElementAbsmax, ElementCompute> cvt_compute_to_absmax;
cutlass::NumericConverter<typename Conv::EpilogueOutputOp::ElementOutput, ElementCompute> cvt_compute_to_d;
cutlass::NumericConverter<typename Conv::EpilogueOutputOp::ElementAuxOutput, ElementCompute> cvt_compute_to_aux;
cutlass::absolute_value_op<ElementCompute> abs;
cutlass::maximum_with_nan_propogation<ElementCompute> max;
ActivationFunctor<ElementCompute> act;
ElementScalingFactor d_scale = kScaleOutput ? scale_D.host_view().at(origin) : ElementScalingFactor(1.);
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
ElementCompute intermediate = cvt_accum_to_compute(tmp_D.host_view().at({n, p, q, k}));
ElementCompute bias = cvt_c_to_compute(tensor_Vector.host_view().at({0, 0, 0, k}));
ElementCompute aux = intermediate + bias;
ElementCompute d = act(aux);
tmp_abs_max_Aux = max(abs(aux), tmp_abs_max_Aux);
tmp_abs_max_D = max(abs(d), tmp_abs_max_D);
reference_D.host_view().at({n, p, q, k}) = cvt_compute_to_d(d * d_scale);
if (kScaleAux) {
reference_Aux.host_view().at({n, p, q, k}) = cvt_compute_to_aux(aux * scale_Aux.host_view().at(origin));
}
}
}
}
}
if (kScaleAux) {
reference_abs_max_Aux.host_view().at(origin) = cvt_compute_to_absmax(tmp_abs_max_Aux);
}
if (kScaleOutput) {
reference_abs_max_D.host_view().at(origin) = cvt_compute_to_absmax(tmp_abs_max_D);
}
return compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Conv::UnderlyingKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::conv::Conv2dProblemSize const &problem_size,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0))
{
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Conv::EpilogueOutputOp::Params::ActivationParams activation_params{alpha, beta};
typename Conv::EpilogueOutputOp::Params epilogue_params{
activation_params,
scale_A.device_data(),
scale_B.device_data(),
scale_C.device_data(),
scale_D.device_data(),
scale_Aux.device_data(),
abs_max_Aux.device_data(),
abs_max_D.device_data()
};
typename Conv::Arguments arguments{
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D.device_ref(),
tensor_Aux.device_ref(),
epilogue_params,
cutlass::conv::SplitKMode::kSerial,
tensor_Vector.device_data(),
0
};
Conv conv2d_op;
cutlass::Status status = conv2d_op.can_implement(arguments);
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
size_t workspace_size = Conv::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
status = conv2d_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the GEMM
//
status = conv2d_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
cudaError_t cuda_error = cudaDeviceSynchronize();
EXPECT_TRUE(cuda_error == cudaSuccess) << cudaGetErrorString(cuda_error);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Failed" << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ImplicitGemm,
template<typename T> class ActivationFunctor = cutlass::epilogue::thread::Identity
>
bool TestAllConv2dWithAbsmax(bool scaleA=true, bool scaleB=true, bool scaleC=true) {
const Conv2dProblemVector &conv_test_sizes = Conv2dProblemVector();
const Conv2dProblemVector &conv_blacklist_sizes = Conv2dProblemVector();
//
// Testbed object
//
TestbedConv2dWithAbsMax<ImplicitGemm, ActivationFunctor> testbed(scaleA, scaleB, scaleC);
//
// Get conv problem sizes to run conv operator
//
TestbedConv2dProblemSizes conv_problems(128/cutlass::sizeof_bits<typename ImplicitGemm::ElementA>::value);
// Vector of conv2d problem sizes to avoid duplicate runs
Conv2dProblemVector conv_tested_sizes;
Conv2dProblemVector const *problem_vectors[] = {
&conv_test_sizes, // run user specified sizes
&conv_problems.conv2d_default_sizes, // run default and cudnn bug sizes
&conv_problems.conv2d_resnet50_sizes, // run resnet50 sizes
#if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
&conv_problems.conv2d_rigorous_sizes, // run large and rigorous sizes if enabled
#endif
};
bool passed = true;
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for (Conv2dProblemVector const * problem_vector : problem_vectors) {
// Prune all problems with channels that aren't divisible by the number of elements accessed per
// load for operands A and B. This is meant to align with the requirements of iterators used for
// fprop kernels.
ChannelDivisibilitySpecification channel_spec(128 / cutlass::sizeof_bits<typename ImplicitGemm::ElementA>::value);
auto pruned_problem_vector = prune(*problem_vector, channel_spec);
// Run conv testbed on default convolution sizes
for(auto conv_problem : pruned_problem_vector) {
// Skip blacklist and avoid duplicate problem sizes
if (std::find(conv_blacklist_sizes.begin(), conv_blacklist_sizes.end(), conv_problem) != conv_blacklist_sizes.end() ||
std::find(conv_tested_sizes.begin(), conv_tested_sizes.end(), conv_problem) != conv_tested_sizes.end()) {
continue;
}
//
// Test
//
// push back tested problem size to avoid re-running duplicates
conv_tested_sizes.push_back(conv_problem);
// test mode = xcross
passed &= testbed.run(conv_problem);
if (!passed) {
return false;
}
// test mode = convolution
passed &= testbed.run(conv_problem.reset_mode(cutlass::conv::Mode::kConvolution));
if (!passed) {
return false;
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace conv
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/conv/device/conv2d_with_absmax_testbed.h/0 | {
"file_path": "test/unit/conv/device/conv2d_with_absmax_testbed.h",
"repo_id": "test",
"token_count": 8980
} | 46 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <iomanip>
#include <utility>
#include <type_traits>
#include <vector>
#include <numeric>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include "tiled_cp_async_testbed.hpp"
using namespace cute;
TEST(SM80_CuTe_tiled_cp_async, no_swizzle_mn_single_tile)
{
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{});
using thr_layout = decltype(Layout<Shape <_16, _8>, Stride< _1,_16>>{});
using val_layout = decltype(Layout<Shape<_2,_1>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using smem_layout_atom = decltype(Layout<Shape <_16, _4>, Stride< _1,_16>>{});
using gmem_stride_type = decltype(LayoutLeft{});
test_cp_async_no_swizzle<double, cute::Int<64>, cute::Int<16>, gmem_stride_type, smem_layout_atom, tiled_copy>();
}
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{});
using thr_layout = decltype(Layout<Shape <_16, _8>, Stride< _1,_16>>{});
using val_layout = decltype(Layout<Shape<_2,_1>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using smem_layout_atom = decltype(Layout<Shape <_16, _4>, Stride< _1,_16>>{});
using gmem_stride_type = decltype(LayoutLeft{});
test_cp_async_no_swizzle<double, cute::Int<128>, cute::Int<16>, gmem_stride_type, smem_layout_atom, tiled_copy>();
}
}
TEST(SM80_CuTe_tiled_cp_async, no_swizzle_k_single_tile)
{
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{});
using thr_layout = decltype(Layout<Shape <_16, _8>, Stride< _8,_1>>{});
using val_layout = decltype(Layout<Shape<_1,_2>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using smem_layout_atom = decltype(make_ordered_layout(Shape<_128,_16>{}, Step <_2, _1>{}));
using gmem_stride_type = decltype(LayoutRight{});
test_cp_async_no_swizzle<double, cute::Int<128>, cute::Int<16>, gmem_stride_type, smem_layout_atom, tiled_copy>();
}
}
TEST(SM80_CuTe_tiled_cp_async, swizzle_mn_single_tile)
{
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{});
using thr_layout = decltype(Layout<Shape <_16, _8>, Stride< _1,_16>>{});
using val_layout = decltype(Layout<Shape<_2,_1>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using swizzle_atom = decltype(Swizzle<2,2,2>{});
using smem_layout_atom = decltype(Layout<Shape <_16, _4>, Stride< _1,_16>>{});
using gmem_stride_type = decltype(LayoutLeft{});
test_cp_async_with_swizzle<double, cute::Int<64>, cute::Int<16>, gmem_stride_type, swizzle_atom, smem_layout_atom, tiled_copy>();
}
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{});
using thr_layout = decltype(Layout<Shape <_16, _8>, Stride< _1,_16>>{});
using val_layout = decltype(Layout<Shape<_2,_1>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using swizzle_atom = decltype(Swizzle<2,2,2>{});
using smem_layout_atom = decltype(Layout<Shape <_16, _4>, Stride< _1,_16>>{});
using gmem_stride_type = decltype(LayoutLeft{});
test_cp_async_with_swizzle<double, cute::Int<128>, cute::Int<16>, gmem_stride_type, swizzle_atom, smem_layout_atom, tiled_copy>();
}
}
TEST(SM80_CuTe_tiled_cp_async, swizzle_k_single_tile)
{
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<double>, double>{});
using thr_layout = decltype(Layout<Shape < _8,_16>, Stride<_16, _1>>{});
using val_layout = decltype(Layout<Shape<_1,_1>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using swizzle_atom = decltype(Swizzle<2,0,4>{});
using smem_layout_atom = decltype(Layout<Shape <_4,_16>, Stride<_1, _4>>{});
using gmem_stride_type = decltype(LayoutRight{});
test_cp_async_with_swizzle<double, cute::Int<128>, cute::Int<16>, gmem_stride_type, swizzle_atom, smem_layout_atom, tiled_copy>();
}
{
using copy_atom = decltype(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, tfloat32_t>{});
using thr_layout = decltype(Layout<Shape <_16,_8>, Stride< _8,_1>>{});
using val_layout = decltype(Layout<Shape < _1,_4>>{});
using tiled_copy = decltype(make_tiled_copy(copy_atom{}, thr_layout{}, val_layout{}));
using swizzle_atom = decltype(Swizzle<3,2,3>{});
using smem_layout_atom = decltype(Layout<Shape < _8,_32>, Stride<_32, _1>>{});
using gmem_stride_type = decltype(LayoutRight{});
test_cp_async_with_swizzle<tfloat32_t, cute::Int<128>, cute::Int<32>, gmem_stride_type, swizzle_atom, smem_layout_atom, tiled_copy>();
}
}
| test/unit/cute/ampere/tiled_cp_async.cu/0 | {
"file_path": "test/unit/cute/ampere/tiled_cp_async.cu",
"repo_id": "test",
"token_count": 2474
} | 47 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass_unit_test.h"
#include <iostream>
#include <cstdint>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/arch/cluster_sm90.hpp>
#include <cutlass/cluster_launch.hpp>
namespace cutlass::test {
template <class ElementType, class SmemLayout>
struct SharedStorage
{
cute::ArrayEngine<ElementType, cute::cosize_v<SmemLayout>> smem;
alignas(16) cute::uint64_t tma_load_mbar[1];
};
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
template <class T, class GmemLayout, class SmemLayout,
class CopyAtom, class CTA_Tiler, class Cluster_Size>
__global__ void
tma_test_device_cute(T const* g_in, T* g_out, GmemLayout gmem_layout, SmemLayout smem_layout,
CUTE_GRID_CONSTANT CopyAtom const tma, CTA_Tiler cta_tiler, Cluster_Size cluster_size)
{
using namespace cute;
CUTE_STATIC_ASSERT_V(product_each(shape(cta_tiler)) == product_each(shape(smem_layout)));
// Use Shared Storage structure to allocate and distribute aligned SMEM addresses
extern __shared__ char shared_memory[];
using SharedStorage = SharedStorage<T, SmemLayout>;
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory);
// Construct SMEM tensor
Tensor sA = make_tensor(make_smem_ptr(shared_storage.smem.begin()), smem_layout); // (CTA_TILE_M,CTA_TILE_N,...)
// Shared memory barriers use 64bits in SMEM for synchronization
uint64_t* tma_load_mbar = shared_storage.tma_load_mbar;
// TMA requires special handling of strides to deal with coord codomain mapping
// Represent the full tensors -- get these from TMA
Tensor mA = tma.get_tma_tensor(shape(gmem_layout));
Tensor mB = make_tensor(make_gmem_ptr<T>(g_out), gmem_layout);
Tensor gA = zipped_divide(mA, cta_tiler); // ((CTA_TILE_M,CTA_TILE_N,...),(REST_M,REST_N,...))
Tensor gB = zipped_divide(mB, cta_tiler); // ((CTA_TILE_M,CTA_TILE_N,...),(REST_M,REST_N,...))
#if 1
if (thread0()) {
print(tma);
print("TILE : "); print(cta_tiler); print("\n");
print(" mA : "); print( mA); print("\n");
print(" mB : "); print( mB); print("\n");
print(" gA : "); print( gA); print("\n");
print(" gB : "); print( gB); print("\n");
print(" sA : "); print( sA); print("\n");
} __syncthreads(); cute::cluster_sync();
#endif
//
// Prepare the TMA_LOAD
//
Tensor sA_x = make_tensor(sA.data(), make_layout(sA.layout(), Layout<_1>{})); // ((CTA_TILE_M,CTA_TILE_N,...),_1)
Tensor tBgB = gB; // ((CTA_TILE_M,CTA_TILE_N,...),(REST_M,REST_N,...))
int cta_rank_in_cluster = cute::block_rank_in_cluster();
auto [tAgA, tAsA] = tma_partition(tma, cta_rank_in_cluster, make_layout(cluster_size), sA_x, gA);
#if 1
if (thread0()) {
print("sA_x : "); print(sA_x); print("\n");
print("tBgB : "); print(tBgB); print("\n");
print("tAgA : "); print(tAgA); print("\n");
print("tAsA : "); print(tAsA); print("\n");
} __syncthreads(); cute::cluster_sync();
#endif
//
// TMA Multicast Masks -- Get a mask of the active ctas in each TMA
//
int elected_cta_rank = 0;
bool elect_one_cta = (elected_cta_rank == cta_rank_in_cluster);
bool elect_one_thr = cute::elect_one_sync();
uint16_t tma_mcast_mask = ((uint16_t(1) << cluster_size) - 1);
#if 1
if (thread0()) {
print("tma_mcast_mask : "); print(tma_mcast_mask); print("\n");
} __syncthreads(); cute::cluster_sync();
#endif
//
// Perform the TMA_LOAD
//
if (elect_one_thr) {
// Initialize TMA barrier
cute::initialize_barrier(tma_load_mbar[0], /* num_threads */ 1);
}
int tma_phase_bit = 0;
// Ensures all CTAs in the Cluster have initialized
__syncthreads();
cute::cluster_sync();
// Loop over the TMA stages, using smem as our buffer
for (int stage = 0; stage < size<1>(tAgA); ++stage)
{
// Set the bytes transferred in this TMA transaction (may involve multiple issues)
constexpr int kTmaTransactionBytes = sizeof(ArrayEngine<T, size(sA)>);
if (elect_one_thr)
{
cute::set_barrier_transaction_bytes(tma_load_mbar[0], kTmaTransactionBytes);
copy(tma.with(tma_load_mbar[0], tma_mcast_mask), tAgA(_,stage), tAsA(_,0));
}
__syncthreads();
/// Wait on the shared memory barrier until the phase bit flips from tma_phase_bit value
cute::wait_barrier(tma_load_mbar[0], tma_phase_bit);
tma_phase_bit ^= 1;
//
// Write out trivially smem -> gmem
//
// Subbyte elements could cause race conditions, so be even more conservative
if (elect_one_cta && elect_one_thr) {
copy(sA, tBgB(_,stage));
}
__syncthreads();
cute::cluster_sync();
}
}
template <class T, class TmaType = T, class CopyOp,
class GMEM_Layout, class SMEM_Layout,
class CTA_Tiler, class Cluster_Size>
auto
test_tma_load(CopyOp const& copy_op,
GMEM_Layout const& gmem_layout,
SMEM_Layout const& smem_layout,
CTA_Tiler const& cta_tiler,
Cluster_Size const& cluster_size)
{
using namespace cute;
// Allocate and initialize host test data
size_t N = ceil_div(cosize(gmem_layout) * sizeof_bits<T>::value, 8);
thrust::host_vector<uint8_t> h_in(N);
for (size_t i = 0; i < h_in.size(); ++i) {
h_in[i] = uint8_t(i % 13);
}
Tensor hA_in = make_tensor(recast_ptr<T>(h_in.data()), gmem_layout);
// Allocate and initialize device test data
thrust::device_vector<uint8_t> d_in = h_in;
thrust::device_vector<uint8_t> d_out(h_in.size(), uint8_t(-1)); // overflow uint
// Create TMA for this device Tensor
Tensor gA = make_tensor(make_gmem_ptr<T>(raw_pointer_cast(d_in.data())), gmem_layout);
auto tma = make_tma_atom<TmaType>(copy_op, gA, smem_layout, cta_tiler, cluster_size);
//print(tma);
// Launch
dim3 dimBlock(32);
dim3 dimCluster(size(cluster_size));
dim3 dimGrid = dimCluster;
int smem_size = sizeof(SharedStorage<T, SMEM_Layout>);
void* kernel_ptr = (void*) &tma_test_device_cute<T, GMEM_Layout, SMEM_Layout,
decltype(tma), CTA_Tiler, Cluster_Size>;
cutlass::launch_kernel_on_cluster({dimGrid, dimBlock, dimCluster, smem_size},
kernel_ptr,
reinterpret_cast<T const*>(raw_pointer_cast(d_in.data())),
reinterpret_cast<T *>(raw_pointer_cast(d_out.data())),
gmem_layout,
smem_layout,
tma, cta_tiler, cluster_size);
// Copy results back to host
thrust::host_vector<uint8_t> h_out = d_out;
Tensor hA_out = make_tensor(recast_ptr<T>(h_out.data()), gmem_layout);
// Validate the results. Print only the first 3 errors.
int count = 3;
for (int i = 0; i < int(size(hA_out)) && count > 0; ++i) {
EXPECT_EQ(hA_in(i), hA_out(i));
if (hA_in(i) != hA_out(i)) {
--count;
}
}
return tma;
}
#endif
} // end namespace cutlass::test
| test/unit/cute/hopper/tma_mcast_load_testbed.hpp/0 | {
"file_path": "test/unit/cute/hopper/tma_mcast_load_testbed.hpp",
"repo_id": "test",
"token_count": 3691
} | 48 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for grouped GEMM problem visitors
*/
#pragma once
#include <iostream>
#include <numeric>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h"
#include "cutlass/gemm/kernel/grouped_problem_visitor.h"
#include "cutlass/util/device_memory.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Use simple problem visitor as a baseline
template <typename ProblemSizeHelper,
typename ThreadblockShape,
int PrefetchTileCount,
int ThreadCount>
struct BaselineProblemVisitor : public cutlass::gemm::kernel::BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape> {
using Base = cutlass::gemm::kernel::BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape>;
using Params = typename Base::Params;
static int const kThreadCount = ThreadCount;
struct SharedStorage {};
int32_t tile_count_sum;
SharedStorage &shared_storage;
//
// Methods
//
CUTLASS_DEVICE
BaselineProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base(params_, block_idx),
shared_storage(shared_storage_)
{
cutlass::gemm::GemmCoord problem = this->problem_size();
cutlass::gemm::GemmCoord grid = this->grid_shape(problem);
tile_count_sum = this->tile_count(grid);
}
CUTLASS_DEVICE
bool next_tile() {
if (this->tile_idx < tile_count_sum) {
return true;
}
do {
++this->problem_idx;
if (this->problem_idx >= this->params.problem_count) {
return false;
}
cutlass::gemm::GemmCoord problem = this->problem_size();
cutlass::gemm::GemmCoord grid = this->grid_shape(problem);
this->problem_tile_start = tile_count_sum;
tile_count_sum += this->tile_count(grid);
} while (tile_count_sum <= this->tile_idx);
return true;
}
static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count) {
return 0;
}
static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count,
void* host_workspace_ptr) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ProblemVisitor>
struct ProblemVisitorKernel {
struct SharedStorage {
typename ProblemVisitor::SharedStorage problem_visitor;
};
struct Params {
typename ProblemVisitor::Params problem_visitor_params;
int32_t* visited_problems_ptr;
int32_t* visited_tiles_ptr;
int32_t visits_per_block;
Params():
visited_problems_ptr(nullptr),
visited_tiles_ptr(nullptr),
visits_per_block(0) {}
Params(typename ProblemVisitor::Params problem_visitor_params_,
int32_t* visited_problems_ptr_,
int32_t* visited_tiles_ptr_,
int32_t visits_per_block_):
problem_visitor_params(problem_visitor_params_),
visited_problems_ptr(visited_problems_ptr_),
visited_tiles_ptr(visited_tiles_ptr_),
visits_per_block(visits_per_block_) {}
};
CUTLASS_DEVICE
void operator()(const Params& params, SharedStorage &shared_storage) {
int32_t store_offset = params.visits_per_block * blockIdx.x;
ProblemVisitor problem_visitor(params.problem_visitor_params,
shared_storage.problem_visitor,
blockIdx.x);
while (problem_visitor.next_tile()) {
int32_t problem_idx = problem_visitor.problem_index();
int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx());
if (threadIdx.x == 0) {
params.visited_problems_ptr[store_offset] = problem_idx;
params.visited_tiles_ptr[store_offset] = threadblock_idx;
++store_offset;
}
problem_visitor.advance(gridDim.x);
}
}
};
template <typename ProblemVisitor>
struct ProblemVisitorRunner {
using BaseKernel = ProblemVisitorKernel<ProblemVisitor>;
using Params = typename BaseKernel::Params;
Params params;
std::vector<cutlass::gemm::GemmCoord> host_problem_sizes;
int32_t problem_count;
int32_t threadblock_count;
int32_t visits_per_block;
cutlass::DeviceAllocation<int32_t> visited_problems;
cutlass::DeviceAllocation<int32_t> visited_tiles;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> device_problem_sizes;
cutlass::DeviceAllocation<uint8_t> workspace;
std::vector<int32_t> host_visited_problems;
std::vector<int32_t> host_visited_tiles;
ProblemVisitorRunner(const std::vector<cutlass::gemm::GemmCoord>& host_problem_sizes_,
int32_t threadblock_count_):
host_problem_sizes(host_problem_sizes_),
problem_count(int32_t(host_problem_sizes_.size())),
threadblock_count(threadblock_count_) {}
/// Initializes GEMM state from arguments.
cutlass::Status initialize() {
size_t workspace_bytes = ProblemVisitor::get_workspace_size(
host_problem_sizes.data(),
problem_count,
threadblock_count);
workspace.reset(workspace_bytes);
std::vector<uint8_t> host_workspace(workspace_bytes);
int32_t tile_count = ProblemVisitor::group_tile_count(host_problem_sizes.data(), problem_count);
ProblemVisitor::host_precompute(host_problem_sizes.data(), problem_count,
threadblock_count, host_workspace.data());
workspace.copy_from_host(host_workspace.data(), workspace_bytes);
device_problem_sizes.reset(problem_count);
device_problem_sizes.copy_from_host(host_problem_sizes.data(), problem_count);
visits_per_block = (tile_count - 1 + threadblock_count) / threadblock_count;
int32_t total_visits = visits_per_block * threadblock_count;
visited_problems.reset(total_visits);
visited_tiles.reset(total_visits);
host_visited_problems.resize(total_visits);
host_visited_tiles.resize(total_visits);
cudaError_t result = cudaMemset(visited_problems.get(), -1, sizeof(int32_t) * total_visits);
if (result != cudaSuccess) {
return cutlass::Status::kErrorInternal;
}
result = cudaMemset(visited_tiles.get(), -1, sizeof(int32_t) * total_visits);
if (result != cudaSuccess) {
return cutlass::Status::kErrorInternal;
}
typename ProblemVisitor::Params pv_params(device_problem_sizes.get(), problem_count, workspace.get(), tile_count);
params = Params(pv_params, visited_problems.get(), visited_tiles.get(), visits_per_block);
return cutlass::Status::kSuccess;
}
bool verify() {
// Sort by problem size and then by threadblock_idx
std::vector<int32_t> indices(host_visited_problems.size());
std::iota(indices.begin(), indices.end(), 0);
std::stable_sort(indices.begin(), indices.end(),
[&](int32_t i1, int32_t i2) {
if (host_visited_problems[i1] == host_visited_problems[i2]) {
return host_visited_tiles[i1] < host_visited_tiles[i2];
}
return host_visited_problems[i1] < host_visited_problems[i2];
});
int32_t idx = 0;
// Skip any entries that were not visited
while (host_visited_problems[indices[idx]] == -1) {
++idx;
}
// Check that each problem visited has the tiles we expect
for (int32_t problem_idx = 0; problem_idx < problem_count; ++problem_idx) {
auto problem = host_problem_sizes[problem_idx];
ProblemVisitor::possibly_transpose_problem(problem);
int32_t problem_tiles = ProblemVisitor::tile_count(ProblemVisitor::grid_shape(problem));
for (int i = 0; i < problem_tiles; ++i) {
EXPECT_EQ(problem_idx, host_visited_problems[indices[idx]]);
EXPECT_EQ(i, host_visited_tiles[indices[idx]]);
++idx;
}
}
return true;
}
bool run(cudaStream_t stream = nullptr) {
cutlass::Status status = initialize();
if (status != cutlass::Status::kSuccess) {
std::cerr << "Initialization failed" << std::endl;
return false;
}
dim3 grid(threadblock_count, 1, 1);
dim3 block(ProblemVisitor::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename BaseKernel::SharedStorage));
cutlass::Kernel<BaseKernel><<<grid, block, smem_size, stream>>>(params);
cudaError_t result = cudaGetLastError();
if (result != cudaSuccess) {
std::cerr << "grid launch failed with error " << cudaGetErrorString(result) << std::endl;
return false;
}
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "cudaDeviceSynchronize failed with error " << cudaGetErrorString(result) << std::endl;
return false;
}
visited_problems.copy_to_host(host_visited_problems.data());
visited_tiles.copy_to_host(host_visited_tiles.data());
return verify();
}
};
template <typename ThreadblockShape,
int PrefetchTileCount,
int ThreadCount,
bool Transpose,
cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode0,
cutlass::gemm::kernel::GroupScheduleMode... Args>
struct TestbedGroupedGemmScheduler {
using PSHelper = cutlass::gemm::kernel::detail::GemmGroupedProblemSizeHelper<ThreadblockShape, Transpose>;
using BaselinePV = BaselineProblemVisitor<PSHelper,
ThreadblockShape,
PrefetchTileCount,
ThreadCount>;
//
// Data members
//
uint32_t seed;
int problem_count;
int threadblock_count;
std::vector<cutlass::gemm::GemmCoord> problem_sizes_host;
//
// Methods
//
TestbedGroupedGemmScheduler(uint32_t seed_ = 3080):
seed(seed_) { srand(seed); }
/// Initializes data structures
void initialize(int32_t scale_factor) {
//
// Choose random problem sizes
//
problem_sizes_host.clear();
problem_sizes_host.resize(problem_count);
for (int32_t i = 0; i < problem_count; ++i) {
cutlass::gemm::GemmCoord problem(
scale_factor * (rand() % 64) + 24,
scale_factor * (rand() % 64) + 24,
scale_factor * (rand() % 64) + 24);
problem_sizes_host.at(i) = problem;
}
}
template <cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode_>
void compare_visitors(const ProblemVisitorRunner<BaselinePV>& baseline_runner) {
using PV = cutlass::gemm::kernel::GemmGroupedProblemVisitor<
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount,
Transpose>;
ProblemVisitorRunner<PV> runner(problem_sizes_host, threadblock_count);
EXPECT_TRUE(runner.run());
// Check that this problem visitor visits the same problems and tiles as the baseline
EXPECT_EQ(baseline_runner.host_visited_problems, runner.host_visited_problems);
EXPECT_EQ(baseline_runner.host_visited_tiles, runner.host_visited_tiles);
}
template <cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode1_,
cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode2_,
cutlass::gemm::kernel::GroupScheduleMode... Rest>
void compare_visitors(const ProblemVisitorRunner<BaselinePV>& baseline_runner) {
// Compare the next visitor with the baseline visitor
compare_visitors<GroupScheduleMode1_>(baseline_runner);
// Recurse to compare the next visitors
compare_visitors<GroupScheduleMode2_, Rest...>(baseline_runner);
}
/// Executes the test on all scheduler modes
void run(int problem_count, int threadblock_count, int scale_factor=8) {
this->problem_count = problem_count;
this->threadblock_count = threadblock_count;
// Initialize the problem
initialize(scale_factor);
// Run the baseline visitor to which we will compare all other visitors
ProblemVisitorRunner<BaselinePV> baseline_runner(problem_sizes_host, threadblock_count);
EXPECT_TRUE(baseline_runner.run());
compare_visitors<Args...>(baseline_runner);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // device
} // gemm
} // test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_grouped_scheduler.h/0 | {
"file_path": "test/unit/gemm/device/testbed_grouped_scheduler.h",
"repo_id": "test",
"token_count": 5660
} | 49 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "testbed_gemv.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcr_alpha_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size, -0.5f);
}
TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcr_alpha_beta_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size, 4.5f, -0.5f);
}
TEST(SM50_batched_gemv, 1x64x24x4096_1x8x4x64_1x1x4x64_rcr_alpha_beta_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 24, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size, cutlass::half_t(4.5f), cutlass::half_t(-0.5f));
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
/////////////
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_crc_alpha_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size, -0.5f);
}
TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_crc_alpha_beta_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size, 4.5f, -0.5f);
}
TEST(SM50_batched_gemv, 1x64x24x4096_1x8x4x64_1x1x4x64_crc_alpha_beta_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 24, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size, cutlass::half_t(4.5f), cutlass::half_t(-0.5f));
}
/////////////
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcc_alpha_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size, -0.5f);
}
TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcc_alpha_beta_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size, 4.5f, -0.5f);
}
TEST(SM50_batched_gemv, 1x64x24x4096_1x8x4x64_1x1x4x64_rcc_alpha_beta_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 24, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size, cutlass::half_t(4.5f), cutlass::half_t(-0.5f));
}
| test/unit/gemm/kernel/batched_gemv.cu/0 | {
"file_path": "test/unit/gemm/kernel/batched_gemv.cu",
"repo_id": "test",
"token_count": 27111
} | 50 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit testbed for kernel-level GEMM
*/
#pragma once
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/core_io.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/host_uncompress.h"
namespace test {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <typename Mma>
__global__ void kernel_multistage_mma_sparse(cutlass::gemm::GemmCoord problem_size,
typename Mma::IteratorA::Params params_A,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::Params params_B,
typename Mma::IteratorB::TensorRef ref_B,
typename Mma::ElementC *ptr_C,
typename Mma::LayoutC::Stride::Index ldc,
typename Mma::IteratorE::Params params_E,
typename Mma::IteratorE::TensorRef ref_E) {
// Shared storage needed by threadblock-scoped matrix multiply-
// Dynamic shared memory base pointer
extern __shared__ int GemmSharedStorageBase[];
// Declare pointer to dynamic shared memory.
typename Mma::SharedStorage *shared_storage =
reinterpret_cast<typename Mma::SharedStorage *>(GemmSharedStorageBase);
// Compute threadblock location
cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y),
0};
cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.k() / Mma::kSparse};
cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(),
tb_tile_offset.n() * Mma::Shape::kN};
cutlass::MatrixCoord tb_offset_E{tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.k() / Mma::kSparse};
// Compute position within threadblock
int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(params_A, ref_A.data(),
{problem_size.m(), problem_size.k() / Mma::kSparse},
tb_thread_id, tb_offset_A);
typename Mma::IteratorB iterator_B(params_B, ref_B.data(),
{problem_size.k(), problem_size.n()},
tb_thread_id, tb_offset_B);
typename Mma::IteratorE iterator_E(
params_E, ref_E.data(),
{problem_size.m(),
problem_size.k() / Mma::kSparse / Mma::kElementsPerElementE},
tb_thread_id, tb_offset_E);
int warp_id = __shfl_sync(0xffffffff, threadIdx.y, 0);
// Construct thread-scoped matrix multiply
Mma mma(*shared_storage, tb_thread_id, warp_id, threadIdx.x);
typename Mma::FragmentC accum;
accum.clear();
int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accum, iterator_A, iterator_B, iterator_E, accum);
// Output results
typename Mma::Operator::IteratorC iterator_C({ptr_C, ldc}, threadIdx.x);
iterator_C.add_tile_offset(
{(tb_tile_offset.m() * Mma::WarpCount::kM) +
(warp_id % Mma::WarpCount::kM),
(tb_tile_offset.n() * Mma::WarpCount::kN) +
(warp_id / Mma::WarpCount::kM)});
iterator_C.store(accum);
}
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Threadblock-level matrix multiply-accumulate
typename MmaCore_>
struct SparseTestbed {
/// Threadblock-level GEMM implementation
using MmaCore = MmaCore_;
using ThreadblockShape = typename MmaCore::Shape;
using WarpShape = typename MmaCore::WarpShape;
using InstructionShape = typename MmaCore::InstructionShape;
using ElementA = typename MmaCore::ElementA;
using LayoutA = typename MmaCore::LayoutA;
using ElementB = typename MmaCore::ElementB;
using LayoutB = typename MmaCore::LayoutB;
using ElementC = typename MmaCore::ElementC;
using LayoutC = typename MmaCore::LayoutC;
using ElementE = typename MmaCore::ElementE;
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using ThreadMapE = typename MmaCore::IteratorThreadMapE;
using AccessTypeA = cutlass::Array<ElementA, ThreadMapA::kElementsPerAccess>;
using AccessTypeB = cutlass::Array<ElementB, ThreadMapB::kElementsPerAccess>;
using AccessTypeE = cutlass::Array<ElementE, ThreadMapE::kElementsPerAccess>;
static int const Stages = MmaCore::kStages;
static cutlass::arch::CacheOperation::Kind const CacheOpA =
MmaCore::kCacheOpA;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
MmaCore::kCacheOpB;
static cutlass::arch::CacheOperation::Kind const CacheOpE =
MmaCore::kCacheOpE;
static int const Sparse = MmaCore::kSparse;
static int const MetaSizeInBits = MmaCore::kMetaSizeInBits;
static int const MaxID2 = MmaCore::kMaxID2;
using LayoutE = cutlass::layout::RowMajor;
using ReorderedLayoutE = typename MmaCore::GmemLayoutE;
static int const ElementsPerElementE = MmaCore::kElementsPerElementE;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK / Sparse>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
// Define iterators over tiles from the E operand
using IteratorE =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK /
Sparse /
ElementsPerElementE>,
ElementE, ReorderedLayoutE, 1, ThreadMapE, AccessTypeE>;
// Define the threadblock-scoped pipelined matrix multiply
using Mma = cutlass::gemm::threadblock::SparseMmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
CacheOpA, IteratorB, typename MmaCore::SmemIteratorB, CacheOpB, ElementC,
LayoutC, IteratorE, typename MmaCore::SmemIteratorE, CacheOpE,
typename MmaCore::MmaPolicy, Stages>;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> matrix_A;
cutlass::HostTensor<ElementA, LayoutA> matrix_A_uncompressed;
cutlass::HostTensor<ElementB, LayoutB> matrix_B;
cutlass::HostTensor<ElementC, LayoutC> matrix_C_computed;
cutlass::HostTensor<ElementC, LayoutC> matrix_C_reference;
cutlass::HostTensor<ElementE, LayoutE> matrix_E;
cutlass::HostTensor<ElementE, ReorderedLayoutE> matrix_E_reordered;
cutlass::gemm::GemmCoord problem_size;
float alpha, beta;
//
// Methods
//
/// Allocates workspace in device memory
SparseTestbed(int m, int n, int k, float alpha_ = float(1), float beta_ = float(0))
: problem_size(m, n, k), alpha(alpha_), beta(beta_) {
matrix_A.reset(cutlass::make_Coord(m, k / Sparse));
matrix_A_uncompressed.reset(cutlass::make_Coord(m, k));
matrix_B.reset(cutlass::make_Coord(k, n));
matrix_C_computed.reset(cutlass::make_Coord(m, n));
matrix_C_reference.reset(cutlass::make_Coord(m, n), false);
matrix_E.reset(cutlass::make_Coord(m, k / Sparse / ElementsPerElementE));
matrix_E_reordered.reset(
cutlass::make_Coord(m, k / Sparse / ElementsPerElementE));
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
return true;
}
/// Runs the test
bool run(
dim3 grid, dim3 block,
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_E = cutlass::Distribution::Uniform) {
// Waive the test
if (!sufficient()) {
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_A.host_view(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_A.host_data(),
matrix_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(matrix_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_B.host_view(), seed + 16, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_B.host_data(),
matrix_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(matrix_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(matrix_C_computed.host_view());
cutlass::reference::host::TensorFill(matrix_C_reference.host_view());
if (init_E == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomSparseMeta(
matrix_E.host_view(), seed, MetaSizeInBits);
} else if (init_E == cutlass::Distribution::Identity) {
uint32_t content = (MaxID2 == 1) ? 0x44444444 : 0x4444;
cutlass::reference::host::TensorFill(matrix_E.host_view(),
(ElementE)(content));
} else {
return false;
}
cutlass::reorder_meta(matrix_E_reordered.host_ref(), matrix_E.host_ref(),
{problem_size.m(), problem_size.n(),
problem_size.k() / Sparse / ElementsPerElementE});
matrix_A.sync_device();
matrix_B.sync_device();
matrix_C_computed.sync_device();
matrix_E_reordered.sync_device();
typename IteratorA::Params params_A(matrix_A.layout());
typename IteratorB::Params params_B(matrix_B.layout());
typename IteratorE::Params params_E(matrix_E_reordered.layout());
cudaError_t result;
int smem_size = int(sizeof(typename Mma::SharedStorage));
if (smem_size >= (48 << 10)) {
result = cudaFuncSetAttribute(
test::gemm::threadblock::kernel_multistage_mma_sparse<Mma>,
cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size);
if (result != cudaSuccess) {
return true;
}
result = cudaFuncSetAttribute(
test::gemm::threadblock::kernel_multistage_mma_sparse<Mma>,
cudaFuncAttributePreferredSharedMemoryCarveout, 100);
if (result != cudaSuccess) {
return true;
}
}
test::gemm::threadblock::kernel_multistage_mma_sparse<Mma>
<<<grid, block, smem_size, 0>>>(
problem_size, params_A, matrix_A.device_ref(), params_B,
matrix_B.device_ref(), matrix_C_computed.device_data(),
matrix_C_computed.layout().stride(0), params_E,
matrix_E_reordered.device_ref());
//
// Check error code
//
result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess)
<< " kernel error: " << cudaGetErrorString(result);
matrix_C_computed.sync_host();
cutlass::uncompress(matrix_A_uncompressed.host_ref(), matrix_A.host_ref(),
matrix_E.host_ref(), problem_size.m(),
problem_size.k());
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC>
reference_gemm;
reference_gemm(problem_size, ElementC(alpha),
matrix_A_uncompressed.host_view(), matrix_B.host_view(),
ElementC(beta), matrix_C_reference.host_view());
bool passed = cutlass::reference::host::TensorEquals(
matrix_C_computed.host_view(), matrix_C_reference.host_view());
EXPECT_TRUE(passed);
if (!passed && CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cout
<< __FILE__ << ":" << __LINE__ << " "
<< "A:\n" << matrix_A.host_view() << "\n"
<< "B:\n" << matrix_B.host_view() << "\n"
<< "E:\n" << matrix_E.host_view() << "\n"
<< "Reference:\n"
<< matrix_C_reference.host_view() << "\n"
<< "Computed:\n"
<< matrix_C_computed.host_view() << "\n";
}
EXPECT_GT(cutlass::reference::host::TensorNorm(matrix_C_reference.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(matrix_C_computed.host_view()), 0);
return passed;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace test
| test/unit/gemm/threadblock/mma_multistage_sparse_testbed.h/0 | {
"file_path": "test/unit/gemm/threadblock/mma_multistage_sparse_testbed.h",
"repo_id": "test",
"token_count": 6933
} | 51 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#pragma once
#include <iostream>
#include <cstdio>
#include <vector>
#include "cutlass/gemm/thread/mma.h"
#include "../kernel/thread/testbed_kernel.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/trace.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include <cuda.h>
#include <nvrtc.h>
#include "../cutlass/nvrtc/environment.h"
#include <assert.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace nvrtc {
namespace thread {
#define NVRTC_RETURN_IF_ERROR(api) \
do { \
nvrtcResult _result = api; \
if (_result != NVRTC_SUCCESS) { \
CUTLASS_TRACE_HOST("Nvrtc error: " << _result); \
return false; \
} \
} while(0)
inline const char * cuda_source_fmt = R"""(
#include "kernel/thread/contraction.hpp"
using Operator = %s;
extern "C" __global__ void global_entry(__grid_constant__ Operator::Params const params) {
extern __shared__ char smem[];
Operator op;
op(params, smem);
}
)""";
struct TestbedKernel {
static bool compile(std::string const &kernel, std::vector<const char *> const &opts) {
int sz = std::snprintf(nullptr, 0, cuda_source_fmt, kernel.c_str());
std::vector<char> cuda_source(sz + 1);
std::snprintf(&cuda_source[0], cuda_source.size(), cuda_source_fmt, kernel.c_str());
nvrtcProgram program;
NVRTC_RETURN_IF_ERROR(
nvrtcCreateProgram(
&program,
cuda_source.data(),
nullptr,
static_cast<int32_t>(cutlass::nvrtc::kCutlassHeaderCount),
cutlass::nvrtc::kCutlassHeaders,
cutlass::nvrtc::kCutlassHeaderNames)
);
nvrtcResult compile_result =
nvrtcCompileProgram(
program,
static_cast<int32_t>(opts.size()),
opts.data());
size_t log_size;
NVRTC_RETURN_IF_ERROR(
nvrtcGetProgramLogSize(program, &log_size)
);
if (log_size > 1) {
auto log = std::make_unique<char[]>(log_size);
NVRTC_RETURN_IF_ERROR(
nvrtcGetProgramLog(program, log.get())
);
std::cout << log.get() << std::endl;
}
NVRTC_RETURN_IF_ERROR(compile_result);
NVRTC_RETURN_IF_ERROR(
nvrtcDestroyProgram(&program)
);
return true;
}
};
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC
>
struct Testbed {
/// Thread-level matrix multiply-accumulate operator
using Mma = cutlass::gemm::thread::Mma<
Shape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC
>;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
//
// Methods
//
/// Allocates workspace in device memory
Testbed() {
tensor_A.reset(cutlass::make_Coord(Shape::kM, Shape::kK));
tensor_B.reset(cutlass::make_Coord(Shape::kK, Shape::kN));
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
}
static inline bool check_nvrtc_error(nvrtcResult error) {
if (error != NVRTC_SUCCESS) {
std::cerr << "failed to compile ";
return false;
}
return true;
}
/// Runs the test
bool run(std::string const &gemm_traits) {
//
// initialize device memory
//
cutlass::reference::host::BlockFillSequential(
tensor_A.host_data(),
tensor_A.capacity()
);
cutlass::reference::host::BlockFillSequential(
tensor_B.host_data(),
tensor_B.capacity(),
ElementB(1),
ElementB(2)
);
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
#if 0
// launch kernel
cutlass::gemm::kernel::testbed_kernel<Mma><<< dim3(1, 1), dim3(1, 1, 1) >>>(
tensor_D_computed.device_data(),
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data());
#else
// Instantiate gemm_kernel
nvrtcResult result_nvrtc;
nvrtcProgram program;
static char const *src =
"#include \"cutlass/gemm/thread/mma.h\"\n"
"#include \"cutlass/gemm/gemm.h\"\n"
"#include \"cutlass/layout/matrix.h\"\n"
"#include \"unit/nvrtc/kernel/thread/testbed_kernel.h\"\n"
;
std::string type_name;
#if 0
// TODO Ideally we'd use nvrtcGetTypeName to determine the type, but it cannot resolve enum symbol names
// As altername solution we might want to implement to_string<GemmTraits>() to get the traits string.
nvrtcGetTypeName<typename GemmTraits_>(&type_name);
#else
type_name = gemm_traits;
#endif
result_nvrtc = nvrtcCreateProgram(&program,
src,
NULL,
(int)cutlass::nvrtc::kCutlassHeaderCount,
cutlass::nvrtc::kCutlassHeaders,
cutlass::nvrtc::kCutlassHeaderNames);
check_nvrtc_error(result_nvrtc);
std::string gemm_kernel_instantiation =
"test::nvrtc::kernel::thread::testbed_kernel< " + type_name + " >";
nvrtcAddNameExpression(program, gemm_kernel_instantiation.c_str());
const char *opts[] = {"--gpu-architecture=compute_75",
"--std=c++17",
"--include-path=/usr/local/cuda-10.1/include"};
result_nvrtc = nvrtcCompileProgram(program, 3, opts);
if (result_nvrtc != NVRTC_SUCCESS) {
size_t logSize;
nvrtcGetProgramLogSize(program, &logSize);
std::vector<char> log(logSize);
nvrtcGetProgramLog(program, log.data());
std::cout << "Compile log:" << std::endl << log.data() << std::endl;
}
if (!check_nvrtc_error(result_nvrtc)) {
assert(0);
}
// The lowered name is the name of the template instantiation in the generated PTX code.
char const *gemm_kernel_lowered_name;
nvrtcGetLoweredName(program, gemm_kernel_instantiation.c_str(), &gemm_kernel_lowered_name);
if (!check_nvrtc_error(result_nvrtc)) {
assert(0);
}
// Query the size of the genereated PTX so that we can allocate storage and retrieve it afterwards
size_t ptx_size;
result_nvrtc = nvrtcGetPTXSize(program, &ptx_size);
if (!check_nvrtc_error(result_nvrtc)) {
assert(0);
}
std::vector<char> ptx(ptx_size);
result_nvrtc = nvrtcGetPTX(program, ptx.data());
if (!check_nvrtc_error(result_nvrtc)) {
assert(0);
}
// we do not need the nvrtc program anymore
//nvrtcDestroyProgram(&program);
CUmodule module;
CUresult result_cuda;
result_cuda = cuModuleLoadDataEx(&module, ptx.data(), 0, 0, 0);
if (result_cuda != CUDA_SUCCESS) {
assert(0);
}
CUfunction kernel;
result_cuda = cuModuleGetFunction(&kernel, module, gemm_kernel_lowered_name);
if (result_cuda != CUDA_SUCCESS) {
assert(0);
}
void* d_a = (void*)tensor_A.device_data();
void* d_b = (void*)tensor_B.device_data();
void* d_c = (void*)tensor_C.device_data();
void* d_d = (void*)tensor_D_computed.device_data();
void* args[] = { &d_d, &d_a, &d_b, &d_c };
// CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void** kernelParams, void** extra
result_cuda = cuLaunchKernel(kernel, 1, 1, 1, 1, 1, 1, 0, 0 /*cudaStreamDefault*/, args, 0);
if (result_cuda != CUDA_SUCCESS) {
assert(0);
} else {
}
#endif
// verify no errors
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cout << "CUDA ERROR: " << cudaGetErrorString(result);
return false;
}
tensor_D_computed.sync_host();
//
// Reference implementation
//
//tensor_D_reference.fill(tensor_C.host_view());
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC> reference_gemm;
reference_gemm(
{Shape::kM, Shape::kN, Shape::kK},
ElementC(1),
tensor_A.host_ref(),
tensor_B.host_ref(),
ElementC(0),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
if(!passed) std::cout
<< "A:\n" << tensor_A.host_view() << "\n\n"
<< "B:\n" << tensor_B.host_view() << "\n\n"
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << std::endl;
std::cout << "passed " << passed << std::endl;
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace nvrtc
} // namespace test
| test/unit/nvrtc/thread/testbed.h/0 | {
"file_path": "test/unit/nvrtc/thread/testbed.h",
"repo_id": "test",
"token_count": 5152
} | 52 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cutlass/library/types.h>
#include <cutlass/blas3_types.h>
#include <cutlass/gemm_coord.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
/////////////////////////////////////////////////////////////////////////////////////////////////
struct MathInstructionDescription {
/// Shape of the target math instruction
cutlass::gemm::GemmCoord instruction_shape;
/// Describes the data type of the internal accumulator
NumericTypeID element_accumulator;
/// Classification of math instruction
OpcodeClassID opcode_class;
/// Type of math operation performed
MathOperationID math_operation;
//
// Methods
//
MathInstructionDescription(
cutlass::gemm::GemmCoord instruction_shape = cutlass::gemm::GemmCoord(),
NumericTypeID element_accumulator = NumericTypeID::kInvalid,
OpcodeClassID opcode_class = OpcodeClassID::kInvalid,
MathOperationID math_operation = MathOperationID::kMultiplyAdd
):
instruction_shape(instruction_shape),
element_accumulator(element_accumulator),
opcode_class(opcode_class),
math_operation(math_operation) {}
// Equality operator
inline
bool operator==(MathInstructionDescription const& rhs) const{
return (
(instruction_shape == rhs.instruction_shape) &&
(element_accumulator == rhs.element_accumulator) &&
(opcode_class == rhs.opcode_class) &&
(math_operation == rhs.math_operation));
}
// Inequality operator
inline
bool operator!=(MathInstructionDescription const& rhs) const {
return !(*this == rhs);
}
};
/// Structure describing the tiled structure of a GEMM-like computation
struct TileDescription {
/// Describes the shape of a threadblock (in elements)
cutlass::gemm::GemmCoord threadblock_shape;
/// Describes the number of pipeline stages in the threadblock-scoped mainloop
int threadblock_stages;
/// Number of warps in each logical dimension
cutlass::gemm::GemmCoord warp_count;
/// Core math instruction
MathInstructionDescription math_instruction;
/// Minimum compute capability (e.g. 70, 75) of a device eligible to run the operation.
int minimum_compute_capability;
/// Minimum compute capability (e.g. 70, 75) of a device eligible to run the operation.
int maximum_compute_capability;
/// Describes the shape of a cluster (in blocks)
cutlass::gemm::GemmCoord cluster_shape;
//
// Methods
//
TileDescription(
cutlass::gemm::GemmCoord threadblock_shape = cutlass::gemm::GemmCoord(),
int threadblock_stages = 0,
cutlass::gemm::GemmCoord warp_count = cutlass::gemm::GemmCoord(),
MathInstructionDescription math_instruction = MathInstructionDescription(),
int minimum_compute_capability = 0,
int maximum_compute_capability = 0,
cutlass::gemm::GemmCoord cluster_shape = cutlass::gemm::GemmCoord(1,1,1)
):
threadblock_shape(threadblock_shape),
threadblock_stages(threadblock_stages),
warp_count(warp_count),
math_instruction(math_instruction),
minimum_compute_capability(minimum_compute_capability),
maximum_compute_capability(maximum_compute_capability),
cluster_shape(cluster_shape) { }
// Equality operator
inline
bool operator==(TileDescription const& rhs) const{
return (
(threadblock_shape == rhs.threadblock_shape) &&
(threadblock_stages == rhs.threadblock_stages) &&
(warp_count == rhs.warp_count) &&
(math_instruction == rhs.math_instruction) &&
(minimum_compute_capability == rhs.minimum_compute_capability) &&
(maximum_compute_capability == rhs.maximum_compute_capability));
}
// Inequality operator
inline
bool operator!=(TileDescription const& rhs) const {
return !(*this == rhs);
}
};
/// High-level description of an operation
struct OperationDescription {
/// Unique identifier describing the operation
char const * name;
/// Operation provider
Provider provider;
/// Kind of operation
OperationKind kind;
/// Describes the tiled structure of a GEMM-like computation
TileDescription tile_description;
//
// Methods
//
OperationDescription(
char const * name = "unknown",
Provider provider = Provider::kInvalid,
OperationKind kind = OperationKind::kInvalid,
TileDescription const& tile_description = TileDescription()
):
name(name), provider(provider), kind(kind), tile_description(tile_description) { }
};
/// Structure describing the properties of a tensor
struct TensorDescription {
/// Numeric type of an individual element
NumericTypeID element;
/// Enumerant identifying the layout function for the tensor
LayoutTypeID layout;
/// Alignment restriction on pointers, strides, and extents
int alignment;
/// log2() of the maximum extent of each dimension
int log_extent_range;
/// log2() of the maximum value each relevant stride may have
int log_stride_range;
//
// Methods
//
TensorDescription(
NumericTypeID element = NumericTypeID::kInvalid,
LayoutTypeID layout = LayoutTypeID::kInvalid,
int alignment = 1,
int log_extent_range = 24,
int log_stride_range = 24
):
element(element),
layout(layout),
alignment(alignment),
log_extent_range(log_extent_range),
log_stride_range(log_stride_range) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Description of all GEMM computations
struct GemmDescription : public OperationDescription {
/// Indicates the kind of GEMM performed
GemmKind gemm_kind;
/// Describes the A operand
TensorDescription A;
/// Describes the B operand
TensorDescription B;
/// Describes the source matrix
TensorDescription C;
/// Describes the destination matrix
TensorDescription D;
/// Describes the sparse meta matrices
TensorDescription E;
/// Describes the data type of the scalars passed to the epilogue
NumericTypeID element_epilogue;
/// Describes the structure of parallel reductions
SplitKMode split_k_mode;
/// Transformation on A operand
ComplexTransform transform_A;
/// Transformation on B operand
ComplexTransform transform_B;
//
// Methods
//
GemmDescription(
GemmKind gemm_kind = GemmKind::kGemm,
TensorDescription const& A = TensorDescription(),
TensorDescription const& B = TensorDescription(),
TensorDescription const& C = TensorDescription(),
TensorDescription const& D = TensorDescription(),
NumericTypeID element_epilogue = NumericTypeID::kInvalid,
SplitKMode split_k_mode = SplitKMode::kNone,
ComplexTransform transform_A = ComplexTransform::kNone,
ComplexTransform transform_B = ComplexTransform::kNone
):
gemm_kind(gemm_kind),
A(A),
B(B),
C(C),
D(D),
element_epilogue(element_epilogue),
split_k_mode(split_k_mode),
transform_A(transform_A),
transform_B(transform_B) {}
GemmDescription(
OperationDescription op_desc,
GemmKind gemm_kind,
TensorDescription const& A,
TensorDescription const& B,
TensorDescription const& C,
TensorDescription const& D,
NumericTypeID element_epilogue,
SplitKMode split_k_mode,
ComplexTransform transform_A,
ComplexTransform transform_B
):
OperationDescription(op_desc),
gemm_kind(gemm_kind),
A(A),
B(B),
C(C),
D(D),
element_epilogue(element_epilogue),
split_k_mode(split_k_mode),
transform_A(transform_A),
transform_B(transform_B) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Description for structured sparse GEMMs.
struct SparseGemmDescription : public GemmDescription {
/// Description structure for structured sparse GEMM
SparseGemmDescription(
GemmKind gemm_kind = GemmKind::kGemm,
TensorDescription const& A = TensorDescription(),
TensorDescription const& B = TensorDescription(),
TensorDescription const& C = TensorDescription(),
TensorDescription const& D = TensorDescription(),
TensorDescription const& E = TensorDescription(),
NumericTypeID element_epilogue = NumericTypeID::kInvalid,
SplitKMode split_k_mode = SplitKMode::kNone,
ComplexTransform transform_A = ComplexTransform::kNone,
ComplexTransform transform_B = ComplexTransform::kNone
):
GemmDescription(gemm_kind, A, B, C, D, element_epilogue, split_k_mode, transform_A, transform_B)
{this->E = E;}
};
/// Description of all Reduction operations
struct ReductionDescription : public OperationDescription {
/// Describes the data type of workspace
NumericTypeID element_workspace;
/// Describes the data type of final output
NumericTypeID element_output;
/// Describes the data type of the scalars passed to the epilogue
NumericTypeID element_epilogue;
};
/// Description of all Rank K update computations (SYRK, HERK, SYR2K, HER2K)
struct RankKDescription : public OperationDescription {
/// Indicates which device template is used (universal or regular)
RankKKind rank_k_kind;
/// Number of rank update (rank k or rank 2k)
int num_ranks;
/// Describes the A operand
TensorDescription A;
/// Describes the B operand (used only for SYR2K and HER2K)
TensorDescription B;
/// Describes the source and destination matrices
TensorDescription C;
/// Describes the fill mode for matrix C
FillMode fill_mode;
/// Describes the blas mode (symmetric/hermitian)
BlasMode blas_mode;
/// Describes the data type of the scalars passed to the epilogue
NumericTypeID element_epilogue;
/// Describes the structure of parallel reductions
SplitKMode split_k_mode;
/// Transformation on A operand
ComplexTransform transform_A;
/// Transformation on B operand
ComplexTransform transform_B;
//
// Methods
//
RankKDescription(
RankKKind rank_k_kind = RankKKind::kUniversal,
int num_ranks = 1,
TensorDescription const& A = TensorDescription(),
TensorDescription const& B = TensorDescription(),
TensorDescription const& C = TensorDescription(),
FillMode fill_mode = FillMode::kInvalid,
BlasMode blas_mode = BlasMode::kInvalid,
NumericTypeID element_epilogue = NumericTypeID::kInvalid,
SplitKMode split_k_mode = SplitKMode::kNone,
ComplexTransform transform_A = ComplexTransform::kNone,
ComplexTransform transform_B = ComplexTransform::kNone
):
rank_k_kind(rank_k_kind),
num_ranks(num_ranks),
A(A),
B(B),
C(C),
fill_mode(fill_mode),
blas_mode(blas_mode),
element_epilogue(element_epilogue),
split_k_mode(split_k_mode),
transform_A(transform_A),
transform_B(transform_B) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Description of all TRMM computations
struct TrmmDescription : public OperationDescription {
/// Indicates the kind of TRMM performed
TrmmKind trmm_kind;
/// Describes the A operand
TensorDescription A;
/// Describes the side mode for matrix A
SideMode side_mode;
/// Describes the fill mode for matrix A
FillMode fill_mode;
/// Describes the diag type for matrix A
DiagType diag_type;
/// Describes the B operand
TensorDescription B;
/// Describes the source and destination matrices
TensorDescription D;
/// Describes the data type of the scalars passed to the epilogue
NumericTypeID element_epilogue;
/// Describes the structure of parallel reductions
SplitKMode split_k_mode;
/// Transformation on A operand
ComplexTransform transform_A;
//
// Methods
//
TrmmDescription(
TrmmKind trmm_kind = TrmmKind::kUniversal,
TensorDescription const& A = TensorDescription(),
SideMode side_mode = SideMode::kInvalid,
FillMode fill_mode = FillMode::kInvalid,
DiagType diag_type = DiagType::kInvalid,
TensorDescription const& B = TensorDescription(),
TensorDescription const& D = TensorDescription(),
NumericTypeID element_epilogue = NumericTypeID::kInvalid,
SplitKMode split_k_mode = SplitKMode::kNone,
ComplexTransform transform_A = ComplexTransform::kNone
):
trmm_kind(trmm_kind),
A(A),
side_mode(side_mode),
fill_mode(fill_mode),
diag_type(diag_type),
B(B),
D(D),
element_epilogue(element_epilogue),
split_k_mode(split_k_mode),
transform_A(transform_A) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Description of all SYMM/HEMM update computations
struct SymmDescription : public OperationDescription {
/// Indicates which device template is used (universal or regular)
SymmKind symm_kind;
/// Describes the A operand
TensorDescription A;
/// Describes the B operand
TensorDescription B;
/// Describes the source and destination matrices
TensorDescription C;
/// Describes the side mode for matrix A
SideMode side_mode;
/// Describes the fill mode for matrix A
FillMode fill_mode;
/// Describes the blas mode (symmetric/hermitian)
BlasMode blas_mode;
/// Describes the data type of the scalars passed to the epilogue
NumericTypeID element_epilogue;
/// Describes the structure of parallel reductions
SplitKMode split_k_mode;
/// Transformation on A operand
ComplexTransform transform_A;
/// Transformation on B operand
ComplexTransform transform_B;
//
// Methods
//
SymmDescription(
SymmKind symm_kind = SymmKind::kUniversal,
TensorDescription const& A = TensorDescription(),
TensorDescription const& B = TensorDescription(),
TensorDescription const& C = TensorDescription(),
SideMode side_mode = SideMode::kInvalid,
FillMode fill_mode = FillMode::kInvalid,
BlasMode blas_mode = BlasMode::kInvalid,
NumericTypeID element_epilogue = NumericTypeID::kInvalid,
SplitKMode split_k_mode = SplitKMode::kNone,
ComplexTransform transform_A = ComplexTransform::kNone,
ComplexTransform transform_B = ComplexTransform::kNone
):
symm_kind(symm_kind),
A(A),
B(B),
C(C),
side_mode(side_mode),
fill_mode(fill_mode),
blas_mode(blas_mode),
element_epilogue(element_epilogue),
split_k_mode(split_k_mode),
transform_A(transform_A),
transform_B(transform_B) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Description of all Conv2d operations
struct ConvDescription : public OperationDescription {
/// Describes the convolution dimension support (2D or 3D)
int conv_dim;
/// Describes the kind of convolution
ConvKind conv_kind;
/// Describes the type of iterator algorithm (analytic or precomputed)
IteratorAlgorithmID iterator_algorithm;
/// Describes the A operand
TensorDescription A;
/// Describes the B operand
TensorDescription B;
/// Describes the C operand
TensorDescription C;
/// Describes the data type of the scalars passed to the epilogue
NumericTypeID element_epilogue;
//
// Methods
//
// Returns Activation TensorDescription
TensorDescription activation() const {
switch(conv_kind) {
case library::ConvKind::kFprop : return A;
case library::ConvKind::kDgrad : return C;
case library::ConvKind::kWgrad : return B;
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
// Returns Filter TensorDescription
TensorDescription filter() const {
switch(conv_kind) {
case library::ConvKind::kFprop : return B;
case library::ConvKind::kDgrad : return B;
case library::ConvKind::kWgrad : return C;
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
// Returns Output TensorDescription
TensorDescription output() const {
switch(conv_kind) {
case library::ConvKind::kFprop : return C;
case library::ConvKind::kDgrad : return A;
case library::ConvKind::kWgrad : return A;
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/include/cutlass/library/descriptions.h/0 | {
"file_path": "tools/library/include/cutlass/library/descriptions.h",
"repo_id": "tools",
"token_count": 5616
} | 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
\file
\brief Defines a data structure in which a set of functionally equivalent library::Operation
instances may be queried.
*/
#include "cutlass/library/operation_table.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
/////////////////////////////////////////////////////////////////////////////////////////////////
void OperationTable::append(Manifest const &manifest) {
// Insert operations into appropriate data structure
for (auto const & operation : manifest) {
OperationDescription const &desc = operation->description();
// insert all gemm operation into operation table
if (desc.kind == OperationKind::kGemm) {
GemmDescription const &gemm_desc = static_cast<GemmDescription const &>(desc);
GemmFunctionalKey functional_key(
gemm_desc.provider,
gemm_desc.gemm_kind,
gemm_desc.tile_description.math_instruction.element_accumulator,
gemm_desc.element_epilogue,
gemm_desc.A.element,
gemm_desc.A.layout,
gemm_desc.transform_A,
gemm_desc.B.element,
gemm_desc.B.layout,
gemm_desc.transform_B,
gemm_desc.C.element,
gemm_desc.C.layout,
gemm_desc.D.element,
gemm_desc.D.layout
);
Operation const *op = operation.get();
int cc = gemm_desc.tile_description.minimum_compute_capability;
int alignment = std::max(std::max(
gemm_desc.A.alignment, gemm_desc.B.alignment), gemm_desc.C.alignment);
GemmPreferenceKey preference_key(cc, alignment);
gemm_operations[functional_key][preference_key].push_back(op);
}
// insert all conv2d or conv3d operation into operation table
if (desc.kind == OperationKind::kConv2d || desc.kind == OperationKind::kConv3d) {
auto &conv_desc = static_cast<library::ConvDescription const &>(desc);
ConvFunctionalKey functional_key(
conv_desc.provider,
conv_desc.conv_kind,
conv_desc.A.element,
conv_desc.A.layout,
conv_desc.B.element,
conv_desc.B.layout,
conv_desc.C.element,
conv_desc.C.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.element_epilogue
);
Operation const *op = operation.get();
int cc = conv_desc.tile_description.minimum_compute_capability;
ConvPreferenceKey preference_key(cc, conv_desc.iterator_algorithm);
// insert conv operation to conv2d_operations or conv3d_operations map
(desc.kind == OperationKind::kConv2d) ?
conv2d_operations[functional_key][preference_key].push_back(op) :
conv3d_operations[functional_key][preference_key].push_back(op);
}
// insert all reduction operation into operation table
if (desc.kind == OperationKind::kReduction) {
auto &reduce_desc = static_cast<library::ReductionDescription const &>(desc);
ReductionFunctionalKey functional_key(
reduce_desc.provider,
reduce_desc.element_workspace,
reduce_desc.tile_description.math_instruction.element_accumulator,
reduce_desc.element_output,
reduce_desc.element_epilogue,
library::MathOperationID::kAdd,
library::EpilogueKind::kLinearCombination
);
Operation const *op = operation.get();
reduction_operations[functional_key] = op;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/operation_table.cu/0 | {
"file_path": "tools/library/src/operation_table.cu",
"repo_id": "tools",
"token_count": 1806
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Helper functions for mapping CUTLASS concepts to cuBLAS.
*/
#pragma once
#if CUTLASS_ENABLE_CUBLAS
#include <cublas_v2.h>
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/util.h"
#include "cutlass/blas3.h"
#include "options.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Converts a cuBLAS status to cutlass::Status
Status get_cutlass_status(cublasStatus_t cublas);
/// Converts a cuBLAS status to cutlass::profiler::Disposition
Disposition get_cutlass_disposition(cublasStatus_t cublas_status);
/// Maps a CUTLASS tensor layout to a cuBLAS transpose operation
bool get_cublas_transpose_operation(
cublasOperation_t &operation,
library::LayoutTypeID layout,
library::ComplexTransform transform = library::ComplexTransform::kNone);
/// Maps a CUTLASS numeric type to a cuBLAS data type enumeration
bool get_cublas_datatype(cublasDataType_t &data_type, library::NumericTypeID element_type);
/// Gets the cublas algorithm given threadblock tile dimensions and math opcode class
cublasGemmAlgo_t get_cublas_gemm_algo(
int cta_m,
int cta_n,
int cta_k,
library::OpcodeClassID opcode_class);
/// Returns a status if cuBLAS can satisfy a particular GEMM description
Status cublas_satisfies(library::GemmDescription const &desc);
/// Returns a status if cuBLAS can satisfy a particular RankK description
Status cublas_satisfies(library::RankKDescription const &desc);
/// Returns a status if cuBLAS can satisfy a particular TRMM description
Status cublas_satisfies(library::TrmmDescription const &desc);
/// Returns a status if cuBLAS can satisfy a particular SYMM/HEMM description
Status cublas_satisfies(library::SymmDescription const &desc);
/// This is a helper class to create cublasHandle_t automatically on CublasCreate object creation and
/// to destroy cublasHandle_t on CublasCreate object destruction.
/// Additionally, it provides implicit cast from CublasCreate's object to cublasHandle_t's object
class CublasCreate {
private:
cublasHandle_t handle;
cublasStatus_t status;
public:
CublasCreate() {
status = cublasCreate(&handle);
}
~CublasCreate() {
cublasDestroy(handle);
}
/// Implicit cast CublasCreate object to cublasHandle_t
operator cublasHandle_t() const { return handle; }
/// returns cublasStatus_t for handle creation
cublasStatus_t get_cublas_create_status() { return status; }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Selects one or more cuBLAS algorithms.
static void select_cublas_algorithms(
std::vector<cublasGemmAlgo_t> &algorithms,
Options const &options,
library::GemmDescription const &op_desc) {
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
switch (options.library.algorithm_mode) {
case AlgorithmMode::kMatching:
{
algorithms.push_back(get_cublas_gemm_algo(
op_desc.tile_description.threadblock_shape.m(),
op_desc.tile_description.threadblock_shape.n(),
op_desc.tile_description.threadblock_shape.k(),
opcode_class));
break;
}
case AlgorithmMode::kBest:
{
// Choose first enumerated mode. If none are enumerated, choose based on opcode class
// and evaluate all of them.
if (options.library.algorithms.empty()) {
// Enumerate all algorithms
if (opcode_class == library::OpcodeClassID::kSimt) {
for (int algo = CUBLAS_GEMM_DEFAULT;
algo <= CUBLAS_GEMM_ALGO23;
++algo) {
algorithms.push_back(cublasGemmAlgo_t(algo));
}
}
else {
for (int algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
algo <= CUBLAS_GEMM_ALGO15_TENSOR_OP;
++algo) {
algorithms.push_back(cublasGemmAlgo_t(algo));
}
}
}
else {
// Use the listed algorithms
algorithms.reserve(options.library.algorithms.size());
for (int algo : options.library.algorithms) {
algorithms.push_back(reinterpret_cast<cublasGemmAlgo_t const &>(algo));
}
}
break;
}
case AlgorithmMode::kDefault:
{
// Use the library's default algorithm
algorithms.push_back((opcode_class == library::OpcodeClassID::kSimt ?
CUBLAS_GEMM_DEFAULT : CUBLAS_GEMM_DEFAULT_TENSOR_OP));
break;
}
default:
{
break;
}
}
}
/// Dispatcher to cublasGemmEx()
struct cublasGemmExDispatcher {
//
// Data members
//
library::GemmUniversalConfiguration configuration;
library::GemmUniversalArguments arguments;
// cublas-specific data structures to fill cublas API call arguments
cublasOperation_t trans_A;
cublasOperation_t trans_B;
cudaDataType_t data_type_A;
cudaDataType_t data_type_B;
cudaDataType_t data_type_C;
cudaDataType_t compute_data_type;
#if (__CUDACC_VER_MAJOR__ >= 11)
cublasComputeType_t compute_type;
#endif
cublasGemmAlgo_t algo;
Status status;
//
// Methods
//
cublasGemmExDispatcher(
library::GemmDescription const &op_desc,
library::GemmUniversalConfiguration configuration_,
library::GemmUniversalArguments arguments_,
cublasGemmAlgo_t algorithm = CUBLAS_GEMM_DFALT
);
/// Executes GEMM using these arguments
cublasStatus_t operator()(cublasHandle_t handle);
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Dispatcher to cublas rank k update kernels
struct cublasRankKDispatcher {
//
// Data members
//
library::RankKConfiguration configuration;
library::RankKArguments arguments;
// cublas-specific data structures to fill cublas API call arguments
cublasOperation_t trans_A;
cublasFillMode_t uplo;
cudaDataType_t data_type_A;
cudaDataType_t data_type_C;
cudaDataType_t compute_data_type;
#if (__CUDACC_VER_MAJOR__ >= 11)
cublasComputeType_t compute_type;
#endif
int num_ranks; //(rank-k or rank-2k)
BlasMode blas_mode; //(symmetric or hermitian)
Status status;
//
// Methods
//
cublasRankKDispatcher(
library::RankKDescription const &op_desc,
library::RankKConfiguration configuration_,
library::RankKArguments arguments_
);
/// Executes RankK using these arguments
cublasStatus_t operator()(cublasHandle_t handle);
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Dispatcher to cublasTrmm()
struct cublasTrmmDispatcher {
//
// Data members
//
library::TrmmConfiguration configuration;
library::TrmmArguments arguments;
// cublas-specific data structures to fill cublas API call arguments
cublasOperation_t trans_A;
cublasSideMode_t side;
cublasFillMode_t uplo;
cublasDiagType_t diag;
cudaDataType_t data_type_A;
cudaDataType_t data_type_B;
cudaDataType_t data_type_D;
cudaDataType_t compute_data_type;
#if (__CUDACC_VER_MAJOR__ >= 11)
cublasComputeType_t compute_type;
#endif
Status status;
//
// Methods
//
cublasTrmmDispatcher(
library::TrmmDescription const &op_desc,
library::TrmmConfiguration configuration_,
library::TrmmArguments arguments_
);
/// Executes TRMM using these arguments
cublasStatus_t operator()(cublasHandle_t handle);
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Dispatcher to cublas symm/hemm update kernels
struct cublasSymmDispatcher {
//
// Data members
//
library::SymmConfiguration configuration;
library::SymmArguments arguments;
// cublas-specific data structures to fill cublas API call arguments
cublasSideMode_t side;
cublasFillMode_t uplo;
cudaDataType_t data_type_A;
cudaDataType_t data_type_B;
cudaDataType_t data_type_C;
cudaDataType_t compute_data_type;
#if (__CUDACC_VER_MAJOR__ >= 11)
cublasComputeType_t compute_type;
#endif
BlasMode blas_mode; //(symmetric or hermitian)
Status status;
//
// Methods
//
cublasSymmDispatcher(
library::SymmDescription const &op_desc,
library::SymmConfiguration configuration_,
library::SymmArguments arguments_
);
/// Executes Symm using these arguments
cublasStatus_t operator()(cublasHandle_t handle);
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace detail
} // namespace profiler
} // namespace cutlass
#endif // #if CUTLASS_ENABLE_CUBLAS
| tools/profiler/include/cutlass/profiler/cublas_helpers.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/cublas_helpers.h",
"repo_id": "tools",
"token_count": 3531
} | 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Command line options for performance test program
*/
#include <algorithm>
#include "cutlass/cutlass.h"
#include "cutlass/version.h"
#include "cutlass/library/util.h"
#include "cutlass/profiler/options.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Newline and indent for help strings
static char const *end_of_line = "\n ";
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Device::Device(cutlass::CommandLine const &cmdline) {
cmdline.get_cmd_line_argument("device", device, 0);
cudaError_t result;
result = cudaGetDeviceProperties(&properties, device);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed for given device");
}
result = cudaSetDevice(device);
if (result != cudaSuccess) {
throw std::runtime_error("cudaSetDevice() failed for given device.");
}
// Permit overriding the compute capability
if (cmdline.check_cmd_line_flag("compute-capability")) {
int cc = compute_capability();
cmdline.get_cmd_line_argument("compute-capability", cc, cc);
properties.major = cc / 10;
properties.minor = cc % 10;
}
// Permit overriding the L2 cache capacity
if (cmdline.check_cmd_line_flag("llc-capacity")) {
int llc_capacity = 0;
cmdline.get_cmd_line_argument("llc-capacity", llc_capacity, 0);
if (llc_capacity >= 0) {
properties.l2CacheSize = (llc_capacity << 10);
}
}
}
void Options::Device::print_usage(std::ostream &out) const {
out << "Device:\n"
<< " --device=<int> "
<< " CUDA Device ID\n\n";
int device_count = 0;
cudaError_t result = cudaGetDeviceCount(&device_count);
if (result != cudaSuccess) {
out << " <could not query for CUDA devices>\n";
}
else {
for (int idx = 0; idx < device_count; ++idx) {
cudaDeviceProp prop;
result = cudaGetDeviceProperties(&prop, idx);
if (result != cudaSuccess) {
out << " <could not obtain device properties for device " << idx << ">" << std::endl;
break;
}
else {
out << " [" << idx << "] - "
<< prop.name << " - SM " << prop.major << "." << prop.minor << ", "
<< prop.multiProcessorCount << " SMs @ " << (prop.clockRate / 1000.0) << " MHz, "
<< "L2 cache: " << (prop.l2CacheSize >> 20) << " MB, Global Memory: " << (prop.totalGlobalMem >> 30) << " GB"
<< std::endl;
}
}
out << "\n";
}
out
<< " --compute-capability=<int> "
<< " Override the compute capability.\n\n"
<< " --llc-capacity=<capacity in KiB> "
<< " Capacity of last-level cache in kilobytes. If this is non-zero," << end_of_line
<< " profiling phases cycle through different input tensors to induce" << end_of_line
<< " capacity misses in the L2.\n\n";
}
void Options::Device::print_device_info(std::ostream &out) const {
int num_devices;
cudaDeviceProp props;
cudaError_t result;
result = cudaGetDeviceCount(&num_devices);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetNumDevices() failed");
}
out << "Device Name,SM,CUDA Device ID,Phy Device ID" << std::endl;
for (int device = 0; device < num_devices; device++) {
result = cudaSetDevice(device);
if (result != cudaSuccess) {
throw std::runtime_error("cudaSetDevice() failed for device");
}
result = cudaGetDeviceProperties(&props, device);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties failed for device");
}
out << props.name << "," << props.major << props.minor << ","
<< device << "," << props.multiGpuBoardGroupID << std::endl;
}
}
void Options::Device::print_options(std::ostream &out, int indent) const {
out
<< indent_str(indent) << "device: " << device << "\n"
<< indent_str(indent) << "clock: " << int(double(properties.clockRate) / 1000.0) << "\n"
<< indent_str(indent) << "compute-capability: " << compute_capability() << "\n";
}
/// Returns the compute capability of the listed device (e.g. 61, 60, 70, 75)
int Options::Device::compute_capability() const {
return properties.major * 10 + properties.minor;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Initialization::Initialization(cutlass::CommandLine const &cmdline) {
cmdline.get_cmd_line_argument("initialization-enabled", enabled, true);
if (cmdline.check_cmd_line_flag("initialization-provider")) {
std::string str;
cmdline.get_cmd_line_argument("initialization-provider", str);
provider = library::from_string<library::Provider>(str);
if (provider == library::Provider::kInvalid) {
enabled = false;
}
else if (provider != library::Provider::kReferenceHost && provider != library::Provider::kReferenceDevice) {
throw std::runtime_error("Unsupported initialization provider specified.");
}
}
else {
provider = library::Provider::kReferenceDevice;
}
cmdline.get_cmd_line_argument("seed", seed, 2019);
if (cmdline.check_cmd_line_flag("dist")) {
// user has set the data distribution (fix data distribution once set)
fix_data_distribution = true;
// set user provided data distribution
get_distribution(cmdline, "dist", data_distribution);
}
else {
// profiler chosen data distribution (allowed to change based on numeric types)
fix_data_distribution = false;
// set uniform data distribution with range [-4, 4]
data_distribution.set_uniform(-4, 4, 0);
}
}
/// Gets the initial distribution
void Options::Initialization::get_distribution(
cutlass::CommandLine const &args,
std::string const &arg,
cutlass::Distribution &dist) {
struct {
const char *label;
cutlass::Distribution::Kind kind;
} distribution_kinds[] = {
{"uniform", cutlass::Distribution::Uniform},
{"gaussian", cutlass::Distribution::Gaussian},
{"identity", cutlass::Distribution::Identity},
{"sequential", cutlass::Distribution::Sequential},
{0, cutlass::Distribution::Invalid}
};
struct {
char const *label;
double *member;
} members[] = {
{"min", &dist.uniform.min},
{"max", &dist.uniform.max},
{"mean", &dist.gaussian.mean},
{"stddev", &dist.gaussian.stddev},
{"pnzA", &dist.gaussian.pnzA},
{"pnzB", &dist.gaussian.pnzB},
{"pnzC", &dist.gaussian.pnzC},
{"start", &dist.sequential.start},
{"delta", &dist.sequential.delta},
{0, 0}
};
// Initalize pnz values to a default value of 100%
dist.gaussian.pnz = 100.0;
dist.gaussian.pnzA = 100.0;
dist.gaussian.pnzB = 100.0;
dist.gaussian.pnzC = 100.0;
using KeyValueVector = std::vector<std::pair<std::string, std::string> >;
KeyValueVector values;
args.get_cmd_line_argument_pairs(arg.c_str(), values);
// The parser expects the first token to be a string identifying the distribution type.
auto it = values.begin();
if (it != values.end()) {
for (int i = 0; distribution_kinds[i].label; ++i) {
if (it->first == distribution_kinds[i].label) {
dist.kind = distribution_kinds[i].kind;
break;
}
}
++it;
}
// Subsequent key-value pairs update the named field of the distribution struct.
for (; it != values.end(); ++it) {
// Integer scaling factor - if < 0, no integer rounding is performed.
if ((it->first.compare("scale") == 0) && !it->second.empty()) {
std::stringstream ss;
ss << it->second;
ss >> dist.int_scale;
continue; // next token
}
// Casts as integer without scaling
if (it->first.compare("integer") == 0) {
dist.int_scale = 0;
continue; // next token
}
// initialize other members
for (int m = 0; members[m].label; ++m) {
if (it->first == members[m].label && !it->second.empty()) {
std::stringstream ss;
ss << it->second;
ss >> *(members[m].member);
}
}
}
}
void Options::Initialization::print_usage(std::ostream &out) const {
out << "Initialization:\n"
<< " --initialization=<bool> "
<< " Enables initialization (default: true). If false, device memory is" << end_of_line
<< " not initialized after allocation.\n\n"
<< " --initialization-provider=<provider> "
<< " Selects initialization provider {host, device*}. (default: '*')\n\n"
<< " --dist=<distribution> "
<< " Data distribution of input tensors {uniform*, gaussian, identity, sequential}" << end_of_line
<< " --dist=uniform,min:<double>,max:<double>,scale:<integer>" << end_of_line
<< " --dist=gaussian,mean:<double>,stddev:<double>,scale:<integer>,pnzA:<double>,pnzB:<double>,pnzC:<double>" << end_of_line
<< " --dist=sequential,start:<double>,delta:<double>,scale:<integer>" << end_of_line
<< " --dist=identity\n\n"
<< " --seed=<int> "
<< " Random number generator seed. Used to enforce deterministic" << end_of_line
<< " initialization.\n\n";
}
void Options::Initialization::print_options(std::ostream &out, int indent) const {
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Library::Library(cutlass::CommandLine const &cmdline) {
algorithm_mode = AlgorithmMode::kDefault;
if (cmdline.check_cmd_line_flag("library-algo-mode")) {
std::string mode = "default";
cmdline.get_cmd_line_argument("library-algo-mode", mode);
algorithm_mode = from_string<AlgorithmMode>(mode);
}
if (cmdline.check_cmd_line_flag("library-algos")) {
// If algorithms are specified, override as kBest.
algorithm_mode = AlgorithmMode::kBest;
std::vector<std::string> tokens;
cmdline.get_cmd_line_arguments("library-algos", tokens);
algorithms.reserve(tokens.size());
for (auto const & token : tokens) {
if (token.find(":")) {
// TODO: tokenized range
}
else {
int algo;
std::stringstream ss;
ss << token;
ss >> algo;
algorithms.push_back(algo);
}
}
}
}
void Options::Library::print_usage(std::ostream &out) const {
out << "Library:\n"
<< " --library-algo-mode=<mode> "
<< " Indicates algorithm mode used to call libraries such as cuBLAS and cuDNN.\n"
<< " "
<< " mode={default*,matching,best}\n\n"
<< " --library-algos=<range-list> "
<< " If --algorithm-mode=best, permits specifying a selection of algorithms.\n\n";
}
void Options::Library::print_options(std::ostream &out, int indent) const {
out
<< indent_str(indent) << "library-algo-mode: " << to_string(algorithm_mode) << "\n"
<< indent_str(indent) << "library-algos: ";
int j = 0;
for (int x : algorithms) {
out << (j++ ? "," : "") << x;
}
out << "\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Profiling::Profiling(cutlass::CommandLine const &cmdline) {
cmdline.get_cmd_line_argument("workspace-count", workspace_count, 0);
cmdline.get_cmd_line_argument("warmup-iterations", warmup_iterations, 10);
cmdline.get_cmd_line_argument("profiling-iterations", iterations, 100);
cmdline.get_cmd_line_argument("sleep-duration", sleep_duration, 50);
cmdline.get_cmd_line_argument("profiling-enabled", enabled, true);
if (cmdline.check_cmd_line_flag("providers")) {
std::vector<std::string> tokens;
cmdline.get_cmd_line_arguments("providers", tokens);
providers.clear();
for (auto const &token : tokens) {
providers.push_back(library::from_string<library::Provider>(token));
}
}
else {
providers.push_back(library::Provider::kCUTLASS);
providers.push_back(library::Provider::kCUBLAS);
providers.push_back(library::Provider::kCUDNN);
}
}
void Options::Profiling::print_usage(std::ostream &out) const {
out << "Profiling:\n"
<< " --workspace-count=<workspace count> "
<< " Number of discrete workspaces maintained to avoid cache-resident " << end_of_line
<< " If zero (default), the amount is chosen for each workload based on " << end_of_line
<< " capacity of the last-level cache.\n\n"
<< " --profiling-iterations=<iterations> "
<< " Number of iterations to profile each kernel. If zero, kernels" << end_of_line
<< " are launched up to the profiling duration.\n\n"
<< " --warmup-iterations=<iterations> "
<< " Number of iterations to execute each kernel prior to profiling.\n\n"
<< " --sleep-duration=<duration> "
<< " Number of ms to sleep between profiling periods (ms).\n\n"
<< " --profiling-enabled=<bool> "
<< " If true, profiling is actually conducted.\n\n"
;
}
void Options::Profiling::print_options(std::ostream &out, int indent) const {
out
<< indent_str(indent) << "profiling_iterations: " << iterations << "\n"
<< indent_str(indent) << "sleep_duration: " << sleep_duration << "\n"
<< indent_str(indent) << "profiling_enabled: " << enabled << "\n"
<< indent_str(indent) << "providers: [";
int j = 0;
for (auto const & provider : providers) {
out << (j++ ? ", " : "") << library::to_string(provider);
}
out << "]\n";
}
/// Returns true if a provider is enabled
bool Options::Profiling::provider_enabled(library::Provider provider) const {
return std::find(providers.begin(), providers.end(), provider) != providers.end();
}
/// Returns the index of a provider if its enabled
size_t Options::Profiling::index(library::Provider provider) const {
size_t idx = 0;
for (auto const & x : providers) {
if (x == provider) {
return idx;
}
++idx;
}
return idx;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Verification::Verification(cutlass::CommandLine const &cmdline) {
cmdline.get_cmd_line_argument("verification-enabled", enabled, true);
if (enabled) {
cmdline.get_cmd_line_argument("verification-required", required, false);
}
cmdline.get_cmd_line_argument("epsilon", epsilon, 0.05);
cmdline.get_cmd_line_argument("nonzero-floor", nonzero_floor, 1.0 / 256.0);
if (cmdline.check_cmd_line_flag("save-workspace")) {
std::string value;
cmdline.get_cmd_line_argument("save-workspace", value);
save_workspace = from_string<SaveWorkspace>(value);
}
else {
save_workspace = SaveWorkspace::kNever;
}
if (cmdline.check_cmd_line_flag("verification-providers")) {
std::vector<std::string> tokens;
cmdline.get_cmd_line_arguments("verification-providers", tokens);
providers.clear();
for (auto const &token : tokens) {
library::Provider provider = library::from_string<library::Provider>(token);
if (provider != library::Provider::kInvalid) {
providers.push_back(provider);
}
}
}
else {
providers.push_back(library::Provider::kCUBLAS);
providers.push_back(library::Provider::kReferenceDevice);
providers.push_back(library::Provider::kCUDNN);
}
}
void Options::Verification::print_usage(std::ostream &out) const {
out << "Verification:\n"
<< " --verification-enabled=<bool> "
<< " Whether to perform verification checks.\n\n"
<< " --epsilon=<error> "
<< " Error threshold. Setting to zero (default) requires" << end_of_line
<< " bit-level equivalence.\n\n"
<< " --nonzero-floor=<floor> "
<< " Results whose absolute value is less than this quantity" << end_of_line
<< " are treated as zero for comparisons.\n\n"
<< " --save-workspace=<string> "
<< " Specifies when to save the GEMM inputs and results to the filesystem." << end_of_line
<< " --save-workspace=never never save workspace (default)" << end_of_line
<< " --save-workspace=incorrect save workspace for incorrect results" << end_of_line
<< " --save-workspace=always always save workspace\n\n"
<< " --verification-providers=<providers> "
<< " List of providers used to verify result. (default: '*')" << end_of_line
<< " Gemm verification-providers {cublas*}" << end_of_line
<< " Conv2d verification-providers {cudnn*, device*, host}"
<< "\n\n";
}
void Options::Verification::print_options(std::ostream &out, int indent) const {
out
<< indent_str(indent) << "verification_enabled: " << enabled << "\n"
<< indent_str(indent) << "epsilon: " << epsilon << "\n"
<< indent_str(indent) << "save_workspace: " << to_string(save_workspace) << "\n"
<< indent_str(indent) << "verification_providers: [";
int j = 0;
for (auto const & provider : providers) {
out << (j++ ? ", " : "") << library::to_string(provider);
}
out << "]\n";
}
/// Returns true if a provider is enabled
bool Options::Verification::provider_enabled(library::Provider provider) const {
return std::find(providers.begin(), providers.end(), provider) != providers.end();
}
/// Returns the index of a provider if its enabled
size_t Options::Verification::index(library::Provider provider) const {
size_t idx = 0;
for (auto const & x : providers) {
if (x == provider) {
return idx;
}
++idx;
}
return idx;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Report::Report(cutlass::CommandLine const &cmdline) {
cmdline.get_cmd_line_argument("append", append, false);
cmdline.get_cmd_line_argument("output", output_path);
cmdline.get_cmd_line_argument("junit-output", junit_output_path);
if (cmdline.check_cmd_line_flag("tags")) {
cmdline.get_cmd_line_argument_pairs("tags", pivot_tags);
}
cmdline.get_cmd_line_argument("report-not-run", report_not_run, false);
cmdline.get_cmd_line_argument("verbose", verbose, true);
cmdline.get_cmd_line_argument("sort-results", sort_results, false);
cmdline.get_cmd_line_argument("print-kernel-before-running", print_kernel_before_running, false);
}
void Options::Report::print_usage(std::ostream &out) const {
out << "Report:\n"
<< " --append=<bool> "
<< " If true, result is appended to possibly existing file. Otherwise, " << end_of_line
<< " any existing file is overwritten.\n\n"
<< " --output=<path> "
<< " Path to output file for machine readable results. Operation kind and '.csv' is appended.\n\n"
<< " --junit-output=<path> "
<< " Path to junit output file for result reporting. Operation kind and '.junit.xml' is appended.\n\n"
<< " --print-kernel-before-running=<bool> "
<< " Prints the name of the kernel being profiled before running the kernel." << end_of_line
<< " This is useful for determining which kernel is causing a run of the profiler to hang\n\n"
<< " --report-not-run=<bool> "
<< " If true, reports the status of all kernels including those that" << end_of_line
<< " do not satisfy the given arguments.\n\n"
<< " --tags=<column:tag,...> "
<< " Inserts leading columns in output table and uniform values for each" << end_of_line
<< " column. Useful for generating pivot tables.\n\n"
<< " --verbose=<bool> "
<< " Prints human-readable text to stdout. If false, nothing is written to stdout.\n\n"
<< " --sort-results=<bool> "
<< " Sorts results (by flops-per-byte).\n\n";
}
void Options::Report::print_options(std::ostream &out, int indent) const {
out
<< indent_str(indent) << "append: " << append << "\n"
<< indent_str(indent) << "output: " << output_path << "\n"
<< indent_str(indent) << "junit-output: " << junit_output_path << "\n"
<< indent_str(indent) << "print-kernel-before-running: " << print_kernel_before_running << "\n"
<< indent_str(indent) << "report-not-run: " << report_not_run << "\n"
<< indent_str(indent) << "tags:\n";
for (auto const & tag : pivot_tags) {
out << indent_str(indent + 1) << tag.first << ": " << tag.second << "\n";
}
out
<< indent_str(indent) << "verbose: " << verbose << "\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::About::About(cutlass::CommandLine const &cmdline) {
help = cmdline.check_cmd_line_flag("help");
version = cmdline.check_cmd_line_flag("version");
device_info = cmdline.check_cmd_line_flag("device-info");
}
void Options::About::print_usage(std::ostream &out) const {
out << "About:\n"
<< " --version ";
print_version(out);
out << "\n";
}
void Options::About::print_version(std::ostream &out) {
out << "CUTLASS " << cutlass::getVersionString()
<< " built on " << __DATE__ << " at " << __TIME__;
if (!cutlass::getGitRevision().empty()) out << " with commit " << cutlass::getGitRevision() << "";
}
void Options::About::print_options(std::ostream &out, int indent) const {
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Options(cutlass::CommandLine const &cmdline):
cmdline(cmdline),
device(cmdline),
initialization(cmdline),
library(cmdline),
profiling(cmdline),
verification(cmdline),
report(cmdline),
about(cmdline) {
if (cmdline.check_cmd_line_flag("mode")) {
std::string token;
cmdline.get_cmd_line_argument("mode", token);
execution_mode = from_string<ExecutionMode>(token);
}
else {
execution_mode = ExecutionMode::kProfile;
}
// Enumerating kernels is equivalent to a dry run.
if (execution_mode == ExecutionMode::kEnumerate) {
execution_mode = ExecutionMode::kDryRun;
}
if (cmdline.check_cmd_line_flag("operation")) {
std::string str;
cmdline.get_cmd_line_argument("operation", str);
operation_kind = library::from_string<library::OperationKind>(str);
}
else if (cmdline.check_cmd_line_flag("function")) {
std::string str;
cmdline.get_cmd_line_argument("function", str);
operation_kind = library::from_string<library::OperationKind>(str);
}
else {
operation_kind = library::OperationKind::kInvalid;
}
if (cmdline.check_cmd_line_flag("operation_names")) {
cmdline.get_cmd_line_arguments("operation_names", operation_names);
}
else if (cmdline.check_cmd_line_flag("kernels")) {
cmdline.get_cmd_line_arguments("kernels", operation_names);
profiling.error_on_no_match = cmdline.check_cmd_line_flag("error-on-no-match");
}
if (cmdline.check_cmd_line_flag("ignore-kernels")) {
cmdline.get_cmd_line_arguments("ignore-kernels", excluded_operation_names);
profiling.error_on_no_match = cmdline.check_cmd_line_flag("error-on-no-match");
}
// Prevent launches on the device for anything other than CUTLASS operation
// Allow verification only on host
if (execution_mode == ExecutionMode::kTrace) {
initialization.provider = library::Provider::kReferenceHost;
verification.providers = {library::Provider::kReferenceHost};
profiling.enabled = false;
}
}
void Options::print_usage(std::ostream &out) const {
out
<< "CUTLASS Profiler\n"
<< "usage:\n\n"
<< " cutlass_profiler [options]\n\n"
<< " --help\n\n"
<< " --mode=<string> "
<< " Cutlass profiler execution mode." << end_of_line
<< " --mode=profile regular verification and profiling (default)" << end_of_line
<< " --mode=dry_run no kernels are launched or workspaces allocated" << end_of_line
<< " --mode=enumerate lists all operation kind and operations" << end_of_line
<< " --mode=trace executes a single device-side computation with" << end_of_line
<< " no other kernel launches\n\n"
<< " --device-info "
<< " Prints information on all GPUs present in the system\n\n"
<< " --operation=<operation_kind> "
<< " CUTLASS operation to profile.\n\n"
<< " --kernels=<string_list> "
<< " Filter operations by kernel names. For example, call all kernels with" << end_of_line
<< " (\"s1688\" and \"nt\") or (\"s844\" and \"tn\" and \"align8\") in their" << end_of_line
<< " operation name using --kernels=\"s1688*nt, s884*tn*align8\"\n\n"
<< " --ignore-kernels=<string_list> "
<< " Excludes kernels whose names match anything in this list.\n\n"
;
//
// Detailed options
//
device.print_usage(out);
out << "\n";
initialization.print_usage(out);
out << "\n";
library.print_usage(out);
out << "\n";
profiling.print_usage(out);
out << "\n";
verification.print_usage(out);
out << "\n";
report.print_usage(out);
out << "\n";
about.print_usage(out);
out << "\n";
}
void Options::print_options(std::ostream &out) const {
out
<< "options:\n"
<< " help: " << about.help << "\n"
<< " mode: " << to_string(execution_mode) << "\n";
out
<< " device:\n";
device.print_options(out, 2);
out
<< " initialization:\n";
initialization.print_options(out, 2);
out
<< " profiling:\n";
profiling.print_options(out, 2);
out
<< " verification:\n";
verification.print_options(out, 2);
out
<< " report:\n";
report.print_options(out, 2);
}
std::string Options::indent_str(int indent) {
return std::string(indent * 2, ' ');
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
| tools/profiler/src/options.cu/0 | {
"file_path": "tools/profiler/src/options.cu",
"repo_id": "tools",
"token_count": 10447
} | 56 |
/******************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief cuda kernels to do layernorm on a device memory tensor with RowMajor layout.
*/
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
#include "device_utils.h"
#include <float.h>
namespace cutlass {
/** \brief interface to do layernorm on a device memory tensor with RowMajor layout.
* \tparam T: data type
*/
template <typename T>
void layernorm(cutlass::MatrixCoord tensor_size,
TensorRef<T, layout::RowMajor> ref_output,
TensorRef<T, layout::RowMajor> ref_input,
TensorRef<T, layout::RowMajor> ref_gamma,
TensorRef<T, layout::RowMajor> ref_beta,
cudaStream_t stream);
/**
* output [m, n] row-major
* input [m, n] row-major
* gamma [n]
* beta [n]
* grid(m)
* block(block_size) -- each block deals with n elements ; each thread deals with ITEM_PER_THREAD elements
*/
template<typename T, int ITEM_PER_THREAD>
__global__ void layernorm_twoPassAlgo_stored_locally_e1(T* output,
const T* input,
const T* gamma,
const T* beta,
const int m,
const int n)
{
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean, s_variance;
T local_val[ITEM_PER_THREAD];
float local_sums[1] = {0.0f};
int offset = m_idx * n;
input += offset;
output += offset;
const T zero = T(0.0f);
#pragma unroll
for (int i = 0 ; i < ITEM_PER_THREAD ; i++){
int index = tid + i*bdimx;
local_val[i] = index < n ? input[index] : zero;
local_sums[0] += static_cast<float>(local_val[i]);
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = local_sums[0] / n;
}
__syncthreads();
local_sums[0] = 0.0f;
#pragma unroll
for (int i = 0 ; i < ITEM_PER_THREAD ; i++){
int index = tid + i*bdimx;
if (index < n){
const float tmp = static_cast<float>(local_val[i]) - s_mean;
local_sums[0] += tmp * tmp;
}
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_variance = rsqrtf(local_sums[0] / n + 1e-5);
}
__syncthreads();
#pragma unroll
for (int i = 0 ; i < ITEM_PER_THREAD ; i++){
int index = tid + i*bdimx;
if (index < n) {
const T gamma_val = gamma[index];
const T beta_val = beta[index];
output[index] = T((static_cast<float>(local_val[i]) - s_mean) * s_variance * static_cast<float>(gamma_val) + static_cast<float>(beta_val));
}
}
}
/**
* output [m, n] row-major
* input [m, n] row-major
* gamma [n]
* beta [n]
* grid(m)
* block(block_size) -- each block deals with block_size*ITEM_PER_THREAD*2 elements;
*/
template<typename T2, typename T, int ITEM_PER_THREAD>
__global__ void layernorm_twoPassAlgo_stored_locally_e2(T2* output,
const T2* input,
const T2* gamma,
const T2* beta,
const int m,
const int n)
{
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean, s_variance;
float local_sums[1] = {0.0f};
T2 local_val[ITEM_PER_THREAD];
const int n_2 = n / 2;
int offset = m_idx * n_2;
input += offset;
output += offset;
const T2 zero = {T(0.0f), T(0.0f)};
#pragma UNROLL
for (int i = 0; i < ITEM_PER_THREAD; i += 1) {
const int index = i*bdimx + tid;
local_val[i] = index < n_2 ? input[index] : zero;
local_sums[0] += static_cast<float>(local_val[i].x) + static_cast<float>(local_val[i].y);
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = local_sums[0] / n;
}
__syncthreads();
local_sums[0] = 0.0f;
#pragma UNROLL
for (int i = 0; i < ITEM_PER_THREAD; i += 1) {
const int index = i*bdimx + tid;
if (index < n_2){
const float2 tmp = {static_cast<float>(local_val[i].x) - s_mean,
static_cast<float>(local_val[i].y) - s_mean};
local_sums[0] += tmp.x * tmp.x + tmp.y * tmp.y;
}
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_variance = rsqrtf(local_sums[0] / n + 1e-5);
}
__syncthreads();
#pragma UNROLL
for (int i = 0; i < ITEM_PER_THREAD; i += 1) {
const int index = i*bdimx + tid;
if (index < n_2){
const T2 gamma_val = gamma[index];
const T2 beta_val = beta[index];
T2 tmp;
tmp.x = T((static_cast<float>(local_val[i].x) - s_mean)*s_variance*static_cast<float>(gamma_val.x) + static_cast<float>(beta_val.x));
tmp.y = T((static_cast<float>(local_val[i].y) - s_mean)*s_variance*static_cast<float>(gamma_val.y) + static_cast<float>(beta_val.y));
output[index] = tmp;
}
}
}
/**
* output [m, n] row-major
* input [m, n] row-major
* gamma [n]
* beta [n]
* grid(m)
* block(block_size) -- each block deals with block_size*ITEM_PER_THREAD*4 elements;
*/
template<typename T4, typename T, int ITEM_PER_THREAD>
__global__ void layernorm_twoPassAlgo_stored_locally_e4(T4* output,
const T4* input,
const T4* gamma,
const T4* beta,
const int m,
const int n)
{
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean, s_variance;
float local_sums[1] = {0.0f};
T4 local_val[ITEM_PER_THREAD];
const int n_4 = n / 4;
int offset = m_idx * n_4;
input += offset;
output += offset;
const T4 zero = {T(0.0f), T(0.0f), T(0.0f), T(0.0f)};
#pragma UNROLL
for (int i = 0; i < ITEM_PER_THREAD; i += 1) {
const int index = i*bdimx + tid;
local_val[i] = index < n_4 ? input[index] : zero;
local_sums[0] += static_cast<float>(local_val[i].x) + static_cast<float>(local_val[i].y) +
static_cast<float>(local_val[i].z) + static_cast<float>(local_val[i].w);
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = local_sums[0] / n;
}
__syncthreads();
local_sums[0] = 0.0f;
#pragma UNROLL
for (int i = 0; i < ITEM_PER_THREAD; i += 1) {
const int index = i*bdimx + tid;
if (index < n_4){
const float4 tmp = {static_cast<float>(local_val[i].x) - s_mean,
static_cast<float>(local_val[i].y) - s_mean,
static_cast<float>(local_val[i].z) - s_mean,
static_cast<float>(local_val[i].w) - s_mean};
local_sums[0] += tmp.x * tmp.x + tmp.y * tmp.y + tmp.z * tmp.z + tmp.w * tmp.w;
}
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_variance = rsqrtf(local_sums[0] / n + 1e-5);
}
__syncthreads();
#pragma UNROLL
for (int i = 0; i < ITEM_PER_THREAD; i += 1) {
const int index = i*bdimx + tid;
if (index < n_4){
const T4 gamma_val = gamma[index];
const T4 beta_val = beta[index];
T4 tmp;
tmp.x = T((static_cast<float>(local_val[i].x) - s_mean)*s_variance*static_cast<float>(gamma_val.x) + static_cast<float>(beta_val.x));
tmp.y = T((static_cast<float>(local_val[i].y) - s_mean)*s_variance*static_cast<float>(gamma_val.y) + static_cast<float>(beta_val.y));
tmp.z = T((static_cast<float>(local_val[i].z) - s_mean)*s_variance*static_cast<float>(gamma_val.z) + static_cast<float>(beta_val.z));
tmp.w = T((static_cast<float>(local_val[i].w) - s_mean)*s_variance*static_cast<float>(gamma_val.w) + static_cast<float>(beta_val.w));
output[index] = tmp;
}
}
}
/**
* output [m, n] row-major
* input [m, n] row-major
* gamma [n]
* beta [n]
* grid(m)
* block(block_size) -- each block deals with n elements ; each thread deals with ITEM_PER_THREAD elements
*/
template<typename T>
__global__ void layernorm_twoPassAlgo_e1(T* output,
const T* input,
const T* gamma,
const T* beta,
const int m,
const int n)
{
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean, s_variance;
float local_sums[1] = {0.0f};
int offset = m_idx * n;
input += offset;
output += offset;
for (int index = tid ; index < n ; index += bdimx){
float local_val = static_cast<float>(input[index]);
local_sums[0] += local_val;
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = local_sums[0] / n;
}
__syncthreads();
local_sums[0] = 0.0f;
for (int index = tid ; index < n ; index += bdimx){
float local_val = static_cast<float>(input[index]);
local_val = local_val - s_mean;
local_sums[0] += local_val * local_val;
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_variance = rsqrtf(local_sums[0] / n + 1e-5);
}
__syncthreads();
for (int index = tid ; index < n ; index += bdimx){
const T gamma_val = gamma[index];
const T beta_val = beta[index];
const T local_val = input[index];
output[index] = T((static_cast<float>(local_val) - s_mean) * s_variance * static_cast<float>(gamma_val) + static_cast<float>(beta_val));
}
}
/**
* output [m, n] row-major
* input [m, n] row-major
* gamma [n]
* beta [n]
* grid(m)
* block(block_size) -- each block deals with block_size*ITEM_PER_THREAD*2 elements;
*/
template<typename T2, typename T>
__global__ void layernorm_twoPassAlgo_e2(T2* output,
const T2* input,
const T2* gamma,
const T2* beta,
const int m,
const int n)
{
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean, s_variance;
float local_sums[1] = {0.0f};
const int n_2 = n / 2;
int offset = m_idx * n_2;
input += offset;
output += offset;
for (int index = tid; index < n_2; index += bdimx) {
const T2 local_val = input[index];
local_sums[0] += static_cast<float>(local_val.x) + static_cast<float>(local_val.y);
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = local_sums[0] / n;
}
__syncthreads();
local_sums[0] = 0.0f;
for (int index = tid; index < n_2; index += bdimx) {
const T2 local_val = input[index];
const float2 tmp = {static_cast<float>(local_val.x) - s_mean,
static_cast<float>(local_val.y) - s_mean};
local_sums[0] += tmp.x * tmp.x + tmp.y * tmp.y;
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_variance = rsqrtf(local_sums[0] / n + 1e-5);
}
__syncthreads();
for (int index = tid; index < n_2; index += bdimx) {
const T2 local_val = input[index];
const T2 gamma_val = gamma[index];
const T2 beta_val = beta[index];
T2 tmp;
tmp.x = T((static_cast<float>(local_val.x) - s_mean)*s_variance*static_cast<float>(gamma_val.x) + static_cast<float>(beta_val.x));
tmp.y = T((static_cast<float>(local_val.y) - s_mean)*s_variance*static_cast<float>(gamma_val.y) + static_cast<float>(beta_val.y));
output[index] = tmp;
}
}
template <typename T>
void layernorm(cutlass::MatrixCoord tensor_size,
TensorRef<T, layout::RowMajor> ref_output,
TensorRef<T, layout::RowMajor> ref_input,
TensorRef<T, layout::RowMajor> ref_gamma,
TensorRef<T, layout::RowMajor> ref_beta,
cudaStream_t stream){
const int m = tensor_size.row();
const int n = tensor_size.column();
T* output = ref_output.data();
const T* input = ref_input.data();
const T* gamma = ref_gamma.data();
const T* beta = ref_beta.data();
dim3 grid(m);
dim3 block((n + 31)/32*32);
if (block.x > 1024){
block.x = 1024;
}
// TODO : There should be better configs for different cases, we only use several samples to show how to use here
// TODO : using registers to store values locally can reduce the loads from global memory and speedup the kernels.
if ((n % 4 == 0) && (n >= 128) && (n <= 4096)) {
block.x = (n/4 + 31)/32*32;
if (std::is_same<T, float>::value) {
layernorm_twoPassAlgo_stored_locally_e4<float4, float, 1><<<grid, block, 0, stream>>>(
(float4*)output,
(const float4*)input,
(const float4*)gamma,
(const float4*)beta,
m,
n);
} // if (std::is_same<T, float>::value)
else {
layernorm_twoPassAlgo_stored_locally_e4<half4, half, 1><<<grid, block, 0, stream>>>(
(half4*)output,
(const half4*)input,
(const half4*)gamma,
(const half4*)beta,
m,
n);
}
} //if ((n % 4 == 0) && (n >= 128) && (n <= 4096))
else if (n % 2 == 0) {
if (n / 2 <= 1024) {
block.x = (n/2 + 31)/32*32;
if (std::is_same<T, float>::value) {
layernorm_twoPassAlgo_stored_locally_e2<float2, float, 1><<<grid, block, 0, stream>>>(
(float2*)output,
(const float2*)input,
(const float2*)gamma,
(const float2*)beta,
m,
n);
} //if (std::is_same<T, float>::value)
else {
layernorm_twoPassAlgo_stored_locally_e2<half2, half, 1><<<grid, block, 0, stream>>>(
(half2*)output,
(const half2*)input,
(const half2*)gamma,
(const half2*)beta,
m,
n);
}
} // if (n / 2 <= 1024)
else if (n <= 8192) {
block.x = ((n + 7)/8 + 31)/32*32;
if (std::is_same<T, float>::value) {
layernorm_twoPassAlgo_stored_locally_e2<float2, float, 4><<<grid, block, 0, stream>>>(
(float2*)output,
(const float2*)input,
(const float2*)gamma,
(const float2*)beta,
m,
n);
} // if (std::is_same<T, float>::value)
else {
layernorm_twoPassAlgo_stored_locally_e2<half2, half, 4><<<grid, block, 0, stream>>>(
(half2*)output,
(const half2*)input,
(const half2*)gamma,
(const half2*)beta,
m,
n);
}
} // if (n <= 8192)
else if (n <= 16384) {
block.x = ((n + 15)/ 16 + 31)/32*32;
if (std::is_same<T, float>::value) {
layernorm_twoPassAlgo_stored_locally_e2<float2, float, 8><<<grid, block, 0, stream>>>(
(float2*)output,
(const float2*)input,
(const float2*)gamma,
(const float2*)beta,
m,
n);
} // if (std::is_same<T, float>::value)
else {
layernorm_twoPassAlgo_stored_locally_e2<half2, half, 8><<<grid, block, 0, stream>>>(
(half2*)output,
(const half2*)input,
(const half2*)gamma,
(const half2*)beta,
m,
n);
}
} // if (n <= 16384)
else if (n <= 32768) {
block.x = ((n + 31)/32 + 31)/32*32;
if (std::is_same<T, float>::value) {
layernorm_twoPassAlgo_stored_locally_e2<float2, float, 16><<<grid, block, 0, stream>>>(
(float2*)output,
(const float2*)input,
(const float2*)gamma,
(const float2*)beta,
m,
n);
} // if (std::is_same<T, float>::value)
else {
layernorm_twoPassAlgo_stored_locally_e2<half2, half, 16><<<grid, block, 0, stream>>>(
(half2*)output,
(const half2*)input,
(const half2*)gamma,
(const half2*)beta,
m,
n);
}
} // if (n <= 32768)
else {
if (block.x > 512)
block.x = 512;
if (std::is_same<T, float>::value) {
layernorm_twoPassAlgo_e2<float2, float><<<grid, block, 0, stream>>>(
(float2 *)output,
(const float2 *)input,
(const float2 *)gamma,
(const float2 *)beta,
m,
n);
} // if (std::is_same<T, float>::value)
else {
layernorm_twoPassAlgo_e2<half2, half><<<grid, block, 0, stream>>>(
(half2 *)output,
(const half2 *)input,
(const half2 *)gamma,
(const half2 *)beta,
m,
n);
}
}
} // if (n % 2 == 0)
else {
if (n <= 1024) {
layernorm_twoPassAlgo_stored_locally_e1<T, 1><<<grid, block, 0, stream>>>(
output,
input,
gamma,
beta,
m,
n);
} // if (n <= 1024)
else if (n <= 8192) {
block.x = ((n + 7)/8 + 31)/32*32;
layernorm_twoPassAlgo_stored_locally_e1<T, 8><<<grid, block, 0, stream>>>(
output,
input,
gamma,
beta,
m,
n);
} // if (n <= 8192)
else if (n <= 16384) {
block.x = ((n + 15)/16 + 32)/32*32;
layernorm_twoPassAlgo_stored_locally_e1<T, 16><<<grid, block, 0, stream>>>(
output,
input,
gamma,
beta,
m,
n);
} // if (n <= 16384)
else if (n <= 32768) {
block.x = ((n + 31)/32 + 31)/32*32;
layernorm_twoPassAlgo_stored_locally_e1<T, 32><<<grid, block, 0, stream>>>(
output,
input,
gamma,
beta,
m,
n);
} // if (n <= 32768)
else{
if (block.x > 512) {
block.x = 512;
}
layernorm_twoPassAlgo_e1<<<grid, block, 0, stream>>>(
output,
input,
gamma,
beta,
m,
n);
}
}
}
} //namespace cutlass
| tools/util/include/cutlass/util/device_layernorm.h/0 | {
"file_path": "tools/util/include/cutlass/util/device_layernorm.h",
"repo_id": "tools",
"token_count": 10256
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <stdexcept>
#include "cutlass/cutlass.h"
#include "cutlass/util/reference/device/kernel/tensor_foreach.h"
namespace cutlass {
namespace reference {
namespace device {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Launches a kernel calling a functor for each element in a tensor's index space.
template <typename Func, int Rank, typename Params>
struct TensorForEach {
/// Constructor performs the operation.
TensorForEach(
Coord<Rank> size, Params params = Params(),
int grid_size = 0, int block_size = 0,
cudaStream_t stream = nullptr) {
if (!grid_size || !block_size) {
// if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API
cudaError_t result = cudaOccupancyMaxPotentialBlockSize(
&grid_size,
&block_size,
reinterpret_cast<void const *>(kernel::TensorForEach<Func, Rank, Params>));
if (result != cudaSuccess) {
throw std::runtime_error("Failed to query occupancy.");
}
// Limit block size. This has the effect of increasing the number of items processed by a
// single thread and reduces the impact of initialization overhead.
block_size = (block_size < 128 ? block_size : 128);
}
dim3 grid(grid_size, 1, 1);
dim3 block(block_size, 1, 1);
kernel::TensorForEach<Func, Rank, Params><<< grid, block, 0, stream >>>(size, params);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Launches a kernel calling a functor for each element along a tensor's diagonal
template <typename Func, int Rank, typename Params>
struct TensorDiagonalForEach {
/// Constructor performs the operation
TensorDiagonalForEach(
Coord<Rank> size, Params params = Params(),
int start = 0, int end = -1,
int block_size = 128, cudaStream_t stream = nullptr) {
if (end < 0) {
end = size.min();
}
dim3 block(block_size, 1, 1);
dim3 grid((end - start + block_size - 1) / block_size, 1, 1);
kernel::TensorDiagonalForEach<Func, Rank, Params><<< grid, block, 0, stream >>>(
size, params, start, end);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Func>
struct BlockForEach {
/// Constructor performs the operation.
BlockForEach(
Element *ptr,
size_t capacity,
typename Func::Params params = typename Func::Params(),
int grid_size = 0,
int block_size = 0,
cudaStream_t stream = nullptr) {
if (!grid_size || !block_size) {
// if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API
cudaError_t result = cudaOccupancyMaxPotentialBlockSize(
&grid_size,
&block_size,
reinterpret_cast<void const *>(kernel::BlockForEach<Element, Func>));
if (result != cudaSuccess) {
throw std::runtime_error("Failed to query occupancy.");
}
// Limit block size. This has the effect of increasing the number of items processed by a
// single thread and reduces the impact of initialization overhead.
block_size = (block_size < 128 ? block_size : 128);
}
dim3 grid(grid_size, 1, 1);
dim3 block(block_size, 1, 1);
kernel::BlockForEach<Element, Func><<< grid, block, 0, stream >>>(ptr, capacity, params);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/device/tensor_foreach.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/tensor_foreach.h",
"repo_id": "tools",
"token_count": 1633
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines host-side elementwise operations on TensorView.
*/
#pragma once
// Standard Library includes
#include <utility>
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/relatively_equal.h"
#include "cutlass/tensor_view.h"
#include "cutlass/tensor_view_planar_complex.h"
#include "cutlass/util/distribution.h"
#include "tensor_foreach.h"
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorEqualsFunc {
//
// Data members
//
TensorView<Element, Layout> lhs;
TensorView<Element, Layout> rhs;
bool result;
/// Ctor
TensorEqualsFunc(): result(true) { }
/// Ctor
TensorEqualsFunc(
TensorView<Element, Layout> const &lhs_,
TensorView<Element, Layout> const &rhs_
) :
lhs(lhs_), rhs(rhs_), result(true) { }
/// Visits a coordinate
void operator()(Coord<Layout::kRank> const &coord) {
Element lhs_ = lhs.at(coord);
Element rhs_ = rhs.at(coord);
if (lhs_ != rhs_) {
result = false;
}
}
/// Returns true if equal
operator bool() const {
return result;
}
};
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorRelativelyEqualsFunc {
//
// Data members
//
TensorView<Element, Layout> lhs;
TensorView<Element, Layout> rhs;
Element epsilon;
Element nonzero_floor;
bool result;
/// Ctor
TensorRelativelyEqualsFunc(
TensorView<Element, Layout> const &lhs_,
TensorView<Element, Layout> const &rhs_,
Element epsilon_,
Element nonzero_floor_
) :
lhs(lhs_),
rhs(rhs_),
epsilon(epsilon_),
nonzero_floor(nonzero_floor_),
result(true) { }
/// Visits a coordinate
void operator()(Coord<Layout::kRank> const &coord) {
Element lhs_ = lhs.at(coord);
Element rhs_ = rhs.at(coord);
if (!relatively_equal(lhs_, rhs_, epsilon, nonzero_floor)) {
result = false;
}
}
/// Returns true if equal
operator bool() const {
return result;
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if two tensor views are equal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
bool TensorEquals(
TensorView<Element, Layout> const &lhs,
TensorView<Element, Layout> const &rhs) {
// Extents must be identical
if (lhs.extent() != rhs.extent()) {
return false;
}
detail::TensorEqualsFunc<Element, Layout> func(lhs, rhs);
TensorForEach(
lhs.extent(),
func
);
return bool(func);
}
/// Returns true if two tensor views are equal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
bool TensorEquals(
TensorViewPlanarComplex<Element, Layout> const &lhs,
TensorViewPlanarComplex<Element, Layout> const &rhs) {
// Extents must be identical
if (lhs.extent() != rhs.extent()) {
return false;
}
detail::TensorEqualsFunc<Element, Layout> real_func(
{lhs.data(), lhs.layout(), lhs.extent()},
{rhs.data(), rhs.layout(), rhs.extent()}
);
TensorForEach(
lhs.extent(),
real_func
);
if (!bool(real_func)) {
return false;
}
detail::TensorEqualsFunc<Element, Layout> imag_func(
{lhs.data() + lhs.imaginary_stride(), lhs.layout(), lhs.extent()},
{rhs.data() + rhs.imaginary_stride(), rhs.layout(), rhs.extent()}
);
TensorForEach(
lhs.extent(),
imag_func
);
return bool(imag_func);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if two tensor views are relatively equal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
bool TensorRelativelyEquals(
TensorView<Element, Layout> const &lhs,
TensorView<Element, Layout> const &rhs,
Element epsilon,
Element nonzero_floor) {
// Extents must be identical
if (lhs.extent() != rhs.extent()) {
return false;
}
detail::TensorRelativelyEqualsFunc<Element, Layout> func(lhs, rhs, epsilon, nonzero_floor);
TensorForEach(
lhs.extent(),
func
);
return bool(func);
}
/// Returns true if two tensor views are relatively equal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
bool TensorRelativelyEquals(
TensorViewPlanarComplex<Element, Layout> const &lhs,
TensorViewPlanarComplex<Element, Layout> const &rhs,
Element epsilon,
Element nonzero_floor) {
// Extents must be identical
if (lhs.extent() != rhs.extent()) {
return false;
}
detail::TensorRelativelyEqualsFunc<Element, Layout> real_func(
{lhs.data(), lhs.layout(), lhs.extent()},
{rhs.data(), rhs.layout(), rhs.extent()},
epsilon,
nonzero_floor
);
TensorForEach(
lhs.extent(),
real_func
);
if (!bool(real_func)) {
return false;
}
detail::TensorEqualsFunc<Element, Layout> imag_func(
{lhs.data() + lhs.imaginary_stride(), lhs.layout(), lhs.extent()},
{rhs.data() + rhs.imaginary_stride(), rhs.layout(), rhs.extent()},
epsilon,
nonzero_floor
);
TensorForEach(
lhs.extent(),
imag_func
);
return bool(imag_func);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if two tensor views are NOT equal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
bool TensorNotEquals(
TensorView<Element, Layout> const &lhs,
TensorView<Element, Layout> const &rhs) {
// Extents must be identical
if (lhs.extent() != rhs.extent()) {
return true;
}
detail::TensorEqualsFunc<Element, Layout> func(lhs, rhs);
TensorForEach(
lhs.extent(),
func
);
return !bool(func);
}
/// Returns true if two tensor views are equal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
bool TensorNotEquals(
TensorViewPlanarComplex<Element, Layout> const &lhs,
TensorViewPlanarComplex<Element, Layout> const &rhs) {
return !TensorEquals(lhs, rhs);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorContainsFunc {
//
// Data members
//
TensorView<Element, Layout> view;
Element value;
bool contains;
Coord<Layout::kRank> location;
//
// Methods
//
/// Ctor
TensorContainsFunc(): contains(false) { }
/// Ctor
TensorContainsFunc(
TensorView<Element, Layout> const &view_,
Element value_
) :
view(view_), value(value_), contains(false) { }
/// Visits a coordinate
void operator()(Coord<Layout::kRank> const &coord) {
if (view.at(coord) == value) {
if (!contains) {
location = coord;
}
contains = true;
}
}
/// Returns true if equal
operator bool() const {
return contains;
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if a value is present in a tensor
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
bool TensorContains(
TensorView<Element, Layout> const & view,
Element value) {
detail::TensorContainsFunc<Element, Layout> func(
view,
value
);
TensorForEach(
view.extent(),
func
);
return bool(func);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a pair containing a boolean of whether a value exists in a tensor and the location of
/// of the first occurrence. If the value is not contained in the tensor, the second element of the
/// pair is undefined.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
std::pair<bool, Coord<Layout::kRank> > TensorFind(
TensorView<Element, Layout> const & view,
Element value) {
detail::TensorContainsFunc<Element, Layout> func(
view,
value
);
TensorForEach(
view.extent(),
func
);
return std::make_pair(bool(func), func.location);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/host/tensor_compare.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/tensor_compare.h",
"repo_id": "tools",
"token_count": 3690
} | 59 |
![ALT](./media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS")
[README](./README.md#documentation) > **Contributors**
# CUTLASS Developers and Contributors
This is the official list of CUTLASS developers and contributors.
## DEVELOPERS
Vijay Thakkar<br />
Pradeep Ramani<br />
Cris Cecka<br />
Aniket Shivam<br />
Jack Kosaian<br />
Mark Hoemmen<br />
Richard Cai<br />
Honghao Lu<br />
Ethan Yan<br />
Haicheng Wu<br />
Andrew Kerr<br />
Dustyn Blasig<br />
Fengqi Qiao<br />
Duane Merrill<br />
Yujia Zhai<br />
Rawn Henry<br />
Sergey Klevtsov<br />
Shang Zhang<br />
Piotr Majcher<br />
Paul Springer<br />
Markus Hohnerbach<br />
Jin Wang<br />
Aditya Atluri<br />
## CuTe
Cris Cecka<br />
Vijay Thakkar<br />
## CUTLASS Product Manager
Matthew Nicely<br />
## Former CUTLASS Developers
Manish Gupta<br />
Naila Farooqui<br />
David Tanner<br />
Manikandan Ananth<br />
Zhaodong Chen<br />
Chinmay Talegaonkar<br />
## CONTRIBUTORS
Timothy Costa<br />
Julien Demouth<br />
Brian Fahs<br />
Michael Garland<br />
Michael Goldfarb<br />
Mostafa Hagog<br />
Fei Hu<br />
Alan Kaatz<br />
Tina Li<br />
Timmy Liu<br />
Wei Liu<br />
Tim Martin<br />
Duane Merrill<br />
Kevin Siu<br />
Markus Tavenrath<br />
John Tran<br />
Vicki Wang<br />
Junkai Wu<br />
Fung Xie<br />
Albert Xu<br />
Yang Xu<br />
Jack Yang<br />
Scott Yokim<br />
Xiuxia Zhang<br />
Nick Zhao<br />
## ACKNOWLEDGEMENTS
Girish Bharambe<br />
Luke Durant<br />
Carter Edwards<br />
Olivier Giroux<br />
Stephen Jones<br />
Rishkul Kulkarni<br />
Bryce Lelbach<br />
Joel McCormack<br />
Kyrylo Perelygin<br />
Sean Treichler<br />
| CONTRIBUTORS.md/0 | {
"file_path": "CONTRIBUTORS.md",
"repo_id": "CONTRIBUTORS.md",
"token_count": 657
} | 0 |
theme: jekyll-theme-minimal | docs/_config.yml/0 | {
"file_path": "docs/_config.yml",
"repo_id": "docs",
"token_count": 10
} | 1 |
var searchData=
[
['floatroundstyle',['FloatRoundStyle',['../namespacecutlass.html#aabe6b8ce223bf05f65a4721a3f5447a6',1,'cutlass']]]
];
| docs/search/enums_1.js/0 | {
"file_path": "docs/search/enums_1.js",
"repo_id": "docs",
"token_count": 59
} | 2 |
var searchData=
[
['gaussian',['Gaussian',['../structcutlass_1_1Distribution.html#a499f4023e0d42356ce71d38cc32bf92aa39890d8be86d514207259b1b5dca3ed5',1,'cutlass::Distribution']]]
];
| docs/search/enumvalues_0.js/0 | {
"file_path": "docs/search/enumvalues_0.js",
"repo_id": "docs",
"token_count": 82
} | 3 |
var searchData=
[
['batched_5freduction_2eh',['batched_reduction.h',['../batched__reduction_8h.html',1,'']]],
['batched_5freduction_5ftraits_2eh',['batched_reduction_traits.h',['../batched__reduction__traits_8h.html',1,'']]]
];
| docs/search/files_1.js/0 | {
"file_path": "docs/search/files_1.js",
"repo_id": "docs",
"token_count": 97
} | 4 |
var searchData=
[
['fast_5fmath_2eh',['fast_math.h',['../fast__math_8h.html',1,'']]],
['fragment_5fiterator_5fcomplex_5ftensor_5fop_2eh',['fragment_iterator_complex_tensor_op.h',['../fragment__iterator__complex__tensor__op_8h.html',1,'']]],
['fragment_5fiterator_5fsimt_2eh',['fragment_iterator_simt.h',['../fragment__iterator__simt_8h.html',1,'']]],
['fragment_5fiterator_5ftensor_5fop_2eh',['fragment_iterator_tensor_op.h',['../fragment__iterator__tensor__op_8h.html',1,'']]],
['fragment_5fiterator_5fvolta_5ftensor_5fop_2eh',['fragment_iterator_volta_tensor_op.h',['../fragment__iterator__volta__tensor__op_8h.html',1,'']]],
['fragment_5fiterator_5fwmma_5ftensor_5fop_2eh',['fragment_iterator_wmma_tensor_op.h',['../fragment__iterator__wmma__tensor__op_8h.html',1,'']]],
['functional_2eh',['functional.h',['../functional_8h.html',1,'']]]
];
| docs/search/files_5.js/0 | {
"file_path": "docs/search/files_5.js",
"repo_id": "docs",
"token_count": 381
} | 5 |
var searchData=
[
['output_5ftile_5fthread_5fmap_2eh',['output_tile_thread_map.h',['../output__tile__thread__map_8h.html',1,'']]]
];
| docs/search/files_d.js/0 | {
"file_path": "docs/search/files_d.js",
"repo_id": "docs",
"token_count": 59
} | 6 |
var indexSectionsWithContent =
{
0: "_abcdefghiklmnopqrstuvwxy~",
1: "abcdefghiklmnoprstuvwx",
2: "c",
3: "abcdefghiklmnoprstvw",
4: "_abcdefghiklmnopqrstuvw~",
5: "abcdefghiklmnoprstuvw",
6: "abcdefghiklmnoprstuvwy",
7: "cfgklmnos",
8: "gikrsuv",
9: "_cns",
10: "p"
};
var indexSectionNames =
{
0: "all",
1: "classes",
2: "namespaces",
3: "files",
4: "functions",
5: "variables",
6: "typedefs",
7: "enums",
8: "enumvalues",
9: "defines",
10: "groups"
};
var indexSectionLabels =
{
0: "All",
1: "Classes",
2: "Namespaces",
3: "Files",
4: "Functions",
5: "Variables",
6: "Typedefs",
7: "Enumerations",
8: "Enumerator",
9: "Macros",
10: "Modules"
};
| docs/search/searchdata.js/0 | {
"file_path": "docs/search/searchdata.js",
"repo_id": "docs",
"token_count": 362
} | 7 |
var searchData=
[
['yes',['yes',['../structcutlass_1_1platform_1_1is__base__of__helper.html#ac1cf3f804e7686213fd42c678cc6d669',1,'cutlass::platform::is_base_of_helper']]]
];
| docs/search/typedefs_15.js/0 | {
"file_path": "docs/search/typedefs_15.js",
"repo_id": "docs",
"token_count": 79
} | 8 |
var searchData=
[
['kernelclass',['KernelClass',['../structcutlass_1_1reduction_1_1BatchedReductionTraits.html#a085c72d54426f5eb60f5bffa9c383229',1,'cutlass::reduction::BatchedReductionTraits']]]
];
| docs/search/typedefs_9.js/0 | {
"file_path": "docs/search/typedefs_9.js",
"repo_id": "docs",
"token_count": 83
} | 9 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iostream>
// Run tests on GPUs
int testRun(int arch, std::vector<bool (*)()> & test_funcs, const std::string & test_name) {
bool supported = false;
int arch_major = arch / 10;
int arch_minor = arch - arch / 10 * 10;
if(arch_major >= 8) {
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0)) {
supported = true;
}
}
else if(arch_major >= 7) {
// Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2.
//
// CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples.
if (__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2)) {
supported = true;
}
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!(props.major == arch_major && props.minor == arch_minor)) {
supported = false;
}
if (!supported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
std::cout << "This example isn't supported on current architecture" << std::endl;
return 0;
}
bool pass = true;
std::cout << "Device: " << props.name << std::endl;
std::cout << "Arch: SM" << arch << std::endl;
std::cout << "Test: " << test_name << std::endl;
for(auto func : test_funcs) {
pass &= func();
}
if(pass)
return 0;
else
return -1;
}
| examples/13_two_tensor_op_fusion/test_run.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/test_run.h",
"repo_id": "examples",
"token_count": 1129
} | 10 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using Element = cutlass::Quaternion<float>;
using ElementAccumulator = Element; // Data type of accumulator
using ElementComputeEpilogue = Element; // Data type of epilogue computation (alpha, beta)
using ElementInputA = Element; // Data type of elements in input tensor
using ElementInputB = Element; // Data type of elements in input tensor
using ElementOutput = Element; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassSimt;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm50;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; // SIMT instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 2;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(false),
measure_performance(true),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha_w", alpha.w());
cmd.get_cmd_line_argument("alpha_x", alpha.x());
cmd.get_cmd_line_argument("alpha_y", alpha.y());
cmd.get_cmd_line_argument("alpha_z", alpha.z());
cmd.get_cmd_line_argument("beta_w", beta.w());
cmd.get_cmd_line_argument("beta_x", beta.x());
cmd.get_cmd_line_argument("beta_y", beta.y());
cmd.get_cmd_line_argument("beta_z", beta.z());
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "22_quaternion_conv example\n\n"
<< " This example uses Ampere's Tensor Core operators on F16 data types to compute\n"
<< " forward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/22_quaternion_conv/22_quaternion_conv --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/22_quaternion_conv/22_quaternion_conv --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c()) * 16;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_c(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
7,
-8,
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
7,
-8,
0);
// Fill tensor C on host with zeros
cutlass::reference::host::TensorFill(
tensor_c.host_view());
// Fill tensor C for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_c.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_ref_c.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_c.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on host...\n";
// Compute with reference implementation
cutlass::reference::host::Conv2dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator
>(
problem_size,
tensor_a.host_ref(),
tensor_b.host_ref(),
tensor_c.host_ref(),
tensor_ref_c.host_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_c.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_c.host_view(),
tensor_ref_c.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "22_quaternion_conv_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_c.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {1, 32, 64, 128, 256, 512};
struct Benchmark {
int h, w, c, k, r, s;
} layers[] = {
{56, 56, 64, 256, 1, 1},
{56, 56, 64, 64, 1, 1},
{56, 56, 64, 64, 3, 3},
{56, 56, 256, 64, 1, 1},
{56, 56, 256, 512, 1, 1},
{56, 56, 256, 128, 1, 1},
{28, 28, 128, 128, 3, 3},
{28, 28, 128, 512, 1, 1},
{28, 28, 512, 128, 1, 1},
{28, 28, 512, 1024, 1, 1},
{28, 28, 512, 256, 1, 1},
{14, 14, 256, 256, 3, 3},
{14, 14, 256, 1024, 1, 1},
{14, 14, 1024, 256, 1, 1},
{14, 14, 1024, 2048, 1, 1},
{14, 14, 1024, 512, 1, 1},
{7, 7, 512, 512, 3, 3},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/22_quaternion_conv/quaternion_conv.cu/0 | {
"file_path": "examples/22_quaternion_conv/quaternion_conv.cu",
"repo_id": "examples",
"token_count": 8258
} | 11 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example shows how to compute conv2d gradient with respect to weight (wgrad). In wgrad, the K dimension of
impligit GEMM, corresponding to the sequential reduction loop, is very large (N * P * Q). Split-k with parallel
reduction is highly effective for such cases. Given split_k_slices parameter, it partitions the K loop into
split_k_slices chunks and computes partial reductions in parallel across different blocks. After that,
a parallel reduction kernel is launched to accumulate partial reductions.
In practice, wgrad requires fp32 accumulation to avoid overflow. When the input is fp16, some care is needed
to correctly instantiate the GEMM template.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_wgrad.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/reduction/device/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
// In Wgrad, fp32 accumulation is necessary in practice.
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = cutlass::half_t; // Data type of elements in output tensor
using ElementC = ElementOutput;
using ElementCompute = ElementComputeEpilogue;
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// We need two epilogue functors - one for GEMM and another for the final reduction.
// The epilogue for GEMM is not used, but needed to instantiate the CUTLASS kernel template.
// Note that, when the input is fp16 and accumulation is fp32, the output of GEMM needs to be fp32,
// the final reduction is done in fp32, and the reduction epilogue converts fp32 outputs to fp16.
// Therefore, the output type of the GEMM epilogue is ElementCompute, not ElementOutput.
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOpGEMM = cutlass::epilogue::thread::LinearCombination<
ElementCompute, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementCompute>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
// The epilogue functor for reduction. This is the one that is actually used.
using EpilogueOpReduction = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in lin
using Conv2dWgradKernel = typename cutlass::conv::kernel::DefaultConv2dWgrad<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementAccumulator, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOpGEMM,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dWgradKernel>;
using EpilogueOutputOp = EpilogueOpReduction;
/// Reduction kernel
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>;
using ReductionDevice = cutlass::reduction::device::ReduceSplitK<ReductionKernel>;
using ReductionStrideIndex = typename ReductionDevice::StrideIndex;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
int split_k_slices;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(true),
measure_performance(false),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
split_k_slices(8),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size,
cutlass::MatrixCoord stride) {
this->input_size = input_size;
this->filter_size = filter_size;
conv_stride = stride;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("split-k-slices", split_k_slices);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "30_wgrad_split_k example\n\n"
<< " This example shows how to compute conv2d gradient with respect to weight (wgrad).\n"
<< " In wgrad, the K dimension of impligit GEMM, corresponding to the sequential reduction loop, is very large (N * P * Q).\n"
<< " Split-k with parallel reduction is highly effective for such cases.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --split-k-slices=<int> Split-k factor \n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/30_wgrad_split_k/30_wgrad_split_k --n=32 --h=224 --w=224 --c=128 --k=256 --r=3 --s=3 --split-k-slices=8\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Stride_H,Stride_W,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.conv_stride.row() << ","
<< options.conv_stride.column() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
// Inputs are the output gradient and the original activation.
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.output_size());
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.input_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.filter_size);
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C, D on host with zeros
cutlass::reference::host::TensorFill(tensor_c.host_view());
cutlass::reference::host::TensorFill(tensor_d.host_view());
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Partition the GEMM K loop into split_k_slices chunks
int split_k_slices = options.split_k_slices;
// Construct Conv2dProblemSize with user defined output size
// Do not forget to pass the last argument.
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
using cutlass::layout::TensorNHWC;
cutlass::conv::SplitKMode const split_k_mode = cutlass::conv::SplitKMode::kParallel;
// Since the epilogue is not computed after GEMM, there is no need to pass the C tensor and
// alpha and beta can be set to 1 and 0 respectively.
// Moreover, since the output will be written to the workspace, there is no need to pass
// the D tensor as well.
// Do not forget to pass the last argument.
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
{nullptr, TensorNHWC()},
{nullptr, TensorNHWC()},
{ElementCompute(1), ElementCompute(0)},
split_k_mode
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm implicit_gemm;
size_t workspace_size = implicit_gemm.get_workspace_size(arguments);
// Split-K requires non-zero workspace size. The workspace size grows linearly with split_k_slices.
std::cout << "split-k-slices: " << split_k_slices << std::endl;
std::cout << "workspace size: " << workspace_size << std::endl;
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm.can_implement(arguments);
CUTLASS_CHECK(result.status);
// After the workspace is allocated, we point the GEMM destination pointer to the workspace.
TensorNHWC layout_D{TensorNHWC::packed(options.filter_size)};
arguments.ref_D.reset(reinterpret_cast<ElementCompute*>(workspace.get()), layout_D);
result.status = implicit_gemm.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm();
CUTLASS_CHECK(result.status);
if (split_k_mode == cutlass::conv::SplitKMode::kParallel) {
// Do reduction
ReductionDevice reduction_op;
auto& status = result.status;
static cutlass::conv::Operator const kConvolutionalOperator = ImplicitGemm::kConvolutionalOperator;
typename ReductionDevice::Arguments reduction_args(
cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(),
problem_size.split_k_slices,
cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, problem_size),
// Reduction input
{
reinterpret_cast<ElementAccumulator*> (workspace.get()),
ReductionStrideIndex(tensor_c.stride()[ImplicitGemm::UnderlyingKernel::kTensorCStrideIdx])
},
// Destination
{
tensor_d.device_data(),
ReductionStrideIndex(tensor_d.stride()[ImplicitGemm::UnderlyingKernel::kTensorCStrideIdx])
},
// Source
{
tensor_c.device_data(),
ReductionStrideIndex(tensor_c.stride()[ImplicitGemm::UnderlyingKernel::kTensorCStrideIdx])
},
{options.alpha, options.beta}
);
status = reduction_op.initialize(reduction_args, nullptr);
status = reduction_op();
}
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on device...\n";
// Compute with reference implementation
cutlass::reference::device::Conv2dWgrad<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_ref_d.device_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_c.sync_host();
tensor_d.sync_host();
tensor_ref_d.sync_host();
bool passed = cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "30_wgrad_split_k_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {34, 408};
struct Benchmark {
int h, w, c, k, r, s, stride_h, stride_w;
} layers[] = {
{56, 56, 64, 256, 1, 1, 1, 1},
{56, 56, 64, 64, 1, 1, 1, 1},
{56, 56, 64, 64, 3, 3, 1, 1},
{56, 56, 256, 64, 1, 1, 1, 1},
{56, 56, 256, 512, 1, 1, 2, 2},
{56, 56, 256, 128, 1, 1, 1, 1},
{56, 56, 128, 128, 3, 3, 2, 2},
{28, 28, 128, 512, 1, 1, 1, 1},
{28, 28, 512, 128, 1, 1, 1, 1},
{28, 28, 128, 128, 3, 3, 1, 1},
{28, 28, 512, 1024, 1, 1, 2, 2},
{28, 28, 512, 256, 1, 1, 1, 1},
{28, 28, 256, 256, 3, 3, 2, 2},
{14, 14, 256, 1024, 1, 1, 1, 1},
{14, 14, 1024, 256, 1, 1, 1, 1},
{14, 14, 256, 256, 3, 3, 1, 1},
{14, 14, 1024, 2048, 1, 1, 2, 2},
{14, 14, 1024, 512, 1, 1, 1, 1},
{14, 14, 512, 512, 3, 3, 2, 2},
{ 7, 7, 512, 2048, 1, 1, 1, 1},
{ 7, 7, 2048, 512, 1, 1, 1, 1},
{ 7, 7, 512, 512, 3, 3, 1, 1},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c},
{layer.k, layer.r, layer.s, layer.c},
{layer.stride_h, layer.stride_w});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/30_wgrad_split_k/30_wgrad_split_k.cu/0 | {
"file_path": "examples/30_wgrad_split_k/30_wgrad_split_k.cu",
"repo_id": "examples",
"token_count": 10172
} | 12 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_base.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class CustomMmaBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Policy describing tuning details
using Policy = Policy_;
//
// Dependent types
//
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = GemmShape<
Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);
/// Number of stages
static int const kStages = Stages;
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
template <typename Element, typename OperandShape, typename OperandLayout>
struct OperandSharedStorage {
AlignedBuffer<Element, OperandShape::kCount> buffer;
using TensorRef = TensorRef<Element, OperandLayout>;
CUTLASS_DEVICE
static OperandLayout Layout() {
return OperandLayout::packed({OperandShape::kRow, OperandShape::kColumn});
}
/// Returns a TensorRef to the operand
CUTLASS_HOST_DEVICE
TensorRef ref() {
return TensorRef{buffer.data(), Layout()};
}
};
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<
Shape::kM + Policy::SmemPaddingA::kRow,
Shape::kK * kStages + Policy::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB = MatrixShape<
Shape::kK * kStages + Policy::SmemPaddingB::kRow,
Shape::kN + Policy::SmemPaddingB::kColumn>;
using SharedStorageA = OperandSharedStorage<
typename Operator::ElementA,
ShapeA,
typename Operator::LayoutA>;
using SharedStorageB = OperandSharedStorage<
typename Operator::ElementB,
ShapeB,
typename Operator::LayoutB>;
using TensorRefA = typename SharedStorageA::TensorRef;
using TensorRefB = typename SharedStorageB::TensorRef;
struct SharedStorage {
/// Buffer for A operand
SharedStorageA operand_A;
/// Buffer for B operand
SharedStorageB operand_B;
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator::IteratorB warp_tile_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
CustomMmaBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorageA& shared_storageA,
SharedStorageB& shared_storageB,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: warp_tile_iterator_A_(shared_storageA.ref(), lane_idx),
warp_tile_iterator_B_(shared_storageB.ref(), lane_idx) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/gemm/custom_mma_base.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/gemm/custom_mma_base.h",
"repo_id": "examples",
"token_count": 1908
} | 13 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from typing import List
import torch
import subprocess
import sys
import tempfile
import os
import numpy as np
TORCH_DTYPE_NAME = {
torch.float32: "f32",
torch.float16: "f16",
torch.bfloat16: "b16"
}
NAME_TORCH_DTYPE = {v: k for k, v in TORCH_DTYPE_NAME.items()}
def _tensor_from_storage(tensor: torch.Tensor, dtype) -> torch.Tensor:
# PyTorch >= 2.0
if hasattr(tensor, 'untyped_storage'):
return torch.tensor([], dtype=dtype).set_(tensor.untyped_storage())
return torch.tensor([], dtype=dtype).set_(tensor.storage().untyped())
class PipedSubprocess:
def __init__(self, binary: str) -> None:
self.binary = binary
self.tempdir_ctx = tempfile.TemporaryDirectory()
def __enter__(self) -> "PipedSubprocess":
self.subp = subprocess.Popen(self.binary, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=sys.stderr, text=True, bufsize=0)
self.tempdir = self.tempdir_ctx.__enter__()
self.file_counter = 0
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.tempdir_ctx.__exit__(exc_type, exc_val, exc_tb)
def temp_filename(self, suffix: str) -> str:
self.file_counter += 1
return os.path.join(self.tempdir, f"{self.file_counter}{suffix}")
def write(self, *args) -> None:
for a in args:
self.subp.stdin.write(str(a) + " ")
def writeTensor(self, tensor: torch.Tensor, name: str, stride_names: List[str]) -> None:
print(f"Py ->C++: {TORCH_DTYPE_NAME[tensor.dtype]}:{name}")
tensor_u8 = _tensor_from_storage(tensor, torch.uint8)
self.write("tensor_begin", f"{TORCH_DTYPE_NAME[tensor.dtype]}:{name}", tensor_u8.shape[0])
filename = self.temp_filename(f"{name}.tensor")
assert tensor.storage_offset() == 0
with open(filename, "wb+") as fd:
fd.write(bytes(tensor_u8.numpy()))
self.write("file", filename)
self.write("tensor_end")
for stride_name, stride_value in zip(stride_names, tensor.stride()):
self.write(stride_name, stride_value)
def readTensor(self, name, stride_name, shape) -> torch.Tensor:
tmpfile = self.temp_filename(f"{name}.tensor")
self.write("tmpfile", tmpfile)
self.readExpect("tensor_begin")
dtype_str, name = self.read().split(":")
print(f"C++->Py : {dtype_str}:{name}")
u8len = int(self.read())
dtype = NAME_TORCH_DTYPE[dtype_str]
self.readExpect("file")
self.readExpect(tmpfile)
with open(tmpfile, "rb") as fd:
data = fd.read(u8len)
# `np.array` is not strictly needed, but avoids a torch warning
tensor_u8 = torch.frombuffer(np.array(data), dtype=torch.uint8, count=u8len)
self.readExpect("tensor_end")
tensor = _tensor_from_storage(tensor_u8, dtype)
strides = []
for sn in stride_name:
self.readExpect(sn)
strides.append(int(self.read()))
if len(strides) != shape:
strides.append(1)
assert len(strides) == len(shape), name
return torch.as_strided(tensor, shape, strides)
def readNamed(self, name: str):
self.readExpect(name)
return self.read()
def readExpect(self, what: str) -> None:
r = self.read()
if r != what:
raise ValueError(f"Read {r} but expected {what}")
def read(self):
read_all = []
# Skip initial whitespace
while True:
r = self.subp.stdout.read(1)
if r not in [' ', "\n"]:
read_all.append(r)
break
# Read data
while True:
r = self.subp.stdout.read(1)
if r in [' ', "\n"]:
break
read_all.append(r)
return ''.join(read_all)
| examples/41_fused_multi_head_attention/piped_subprocess.py/0 | {
"file_path": "examples/41_fused_multi_head_attention/piped_subprocess.py",
"repo_id": "examples",
"token_count": 2303
} | 14 |
<jupyter_start><jupyter_text>Example of using epilogue visitor in the CUTLASS Python interfaceThis notebook walks through a basic example of using the CUTLASS Python interface to declare, compile, and run GEMMs with different epilogues through CUTLASS Epilogue Visitor.[](https://colab.research.google.com/github/NVIDIA/cutlass/blob/main/examples/python/04_epilogue_visitor.ipynb) Prerequisites for running on ColabThis notebook requires an NVIDIA GPU. If `nvidia-smi` fails, go to Runtime -> Change runtime type -> Hardware accelerator and confirm a GPU is selected.<jupyter_code>!#nvidia-smi<jupyter_output><empty_output><jupyter_text>If running on Colab, you will need to install the CUTLASS Python interface. To do so, uncomment the following line and run the cell:<jupyter_code>!#pip install nvidia-cutlass<jupyter_output><empty_output><jupyter_text>General setupWe first import various packages needed for the example, construct the input and output tensors that will be used in our example.<jupyter_code>import torch
import cutlass
from cutlass.epilogue import relu
from cutlass import Tensor as FakeTensor
from cutlass.utils.profiler import CUDAEventProfiler
# This controls whether ther C++ GEMM declaration will be printed at each step. Set to `false` to
# omit this information.
print_module = True
# The Epilogue Visitor feature currently only works for SM80 and 90
from cutlass.backend.utils.device import device_cc
if device_cc() not in [80, 90]:
import sys
sys.exit()
m = 16384
n = m
k = 512
type_A = torch.float16
type_B = torch.float16
type_C = torch.float16
type_D = torch.float16
torch.manual_seed(2023)
scope_min = -4
scope_max = 4
tensor_A = torch.ceil(torch.empty(size=(m, k), dtype=type_A, device="cuda").uniform_(scope_min, scope_max))
tensor_B = torch.ceil(torch.empty(size=(k, n), dtype=type_B, device="cuda").uniform_(scope_min, scope_max))
tensor_C = torch.ceil(torch.empty(size=(m, n), dtype=type_C, device="cuda").uniform_(scope_min, scope_max))
tensor_D = torch.zeros_like(tensor_C)
plan = cutlass.op.Gemm(element=torch.float16, layout=cutlass.LayoutType.RowMajor, element_accumulator=torch.float32)<jupyter_output><empty_output><jupyter_text>Define the epilogue visitor functorThe epilogue functor can be defined as a simple Python function and a set of example tensors for inputs and outputs. The example below illustrates a complex epilogue under the directed acyclic graph structure (`F` is used twice). The epilogue takes source tensors in different ranks: `alpha`, `beta` are scalars, `bias` is a column vector to broadcast, and `C`, `aux` are matrices. It contains various math operations from basic arithmatic operations and built-in callable functions like `relu`. It also accomodates multiple outputs `D` and `F`. Note that there are some restrictions on syntax.* Each named variable must be assigned exactly once and defined before it used.* Reserved names: `accum`, `C`, and `D` are reserved for accumulator, tensor_C, and tensor_D.* Return values must be a named variable.The example tensors is a dictionary with tensor names as keys and reference tensors as values. The reference tensors can be `float`, `torch.Tensor`, `numpy.ndarray`, or our `FakeTensor`. They provides the shape and data type information of the inputs and outputs of the epilogue.The epilogue can be generated simply through `cutlass.evt.trace(, )`.<jupyter_code># Define epilogue visitor
def example_epilogue(accum, alpha, C, beta, aux, bias):
F = alpha * accum + (beta * C + aux)
E = relu(F + 1) + bias
D = E + F
return D, F
# Construct inputs and outputs
alpha = 0.5
beta = 0.5
aux = torch.ceil(torch.empty(size=(m, n), dtype=type_C, device="cuda").uniform_(scope_min, scope_max))
bias = torch.ceil(torch.empty(size=(m, 1), dtype=type_C, device="cuda").uniform_(scope_min, scope_max))
tensor_F = torch.zeros_like(tensor_D)
examples_tensors = {
"accum": FakeTensor(element=torch.float32, shape=(m, n), layout_tag=cutlass.LayoutType.RowMajor),
"alpha": alpha,
"C": tensor_C,
"beta": beta,
"aux": aux,
"bias": bias,
"D": tensor_D,
"F": tensor_F
}
# Trace the epilogue visitor
epilogue_visitor = cutlass.epilogue.trace(example_epilogue, examples_tensors)<jupyter_output><empty_output><jupyter_text>Run a GEMM with the epilogue visitor functorThe `epilogue_visitor` can be used by setting the plan's `epilogue_visitor` field. The arguments for the epilogue visitor are provided as a `dict` through the `visitor_args` keyword argument.<jupyter_code>visitor_args = {
"alpha": alpha, "C": tensor_C, "beta": beta,
"aux": aux, "bias": bias, "D": tensor_D, "F": tensor_F
}
plan.epilogue_visitor = epilogue_visitor
plan.run(
tensor_A, tensor_B, tensor_C, tensor_D,
visitor_args=visitor_args, print_module=print_module)<jupyter_output><empty_output><jupyter_text>The epilogue function `example_epilogue` can be used as a reference function. We can now verify the results simply with<jupyter_code>class TorchReference(torch.nn.Module):
def forward(self, A, B, alpha, C, beta, aux, bias):
accum = torch.matmul(A, B)
return example_epilogue(accum, alpha, C, beta, aux, bias)
torch_reference = TorchReference()
tensor_D_ref, tensor_F_ref = torch_reference(tensor_A, tensor_B, alpha, tensor_C, beta, aux, bias)
assert torch.equal(tensor_D, tensor_D_ref)
assert torch.equal(tensor_F, tensor_F_ref)<jupyter_output><empty_output><jupyter_text>The performance of CUTLASS fused kernel can be profiled with<jupyter_code>warmup_iterations = 10
profile_iterations = 50
# Profile CUTLASS fused kernel
duration = CUDAEventProfiler(
plan, warmup_iterations, profile_iterations,
tensor_A, tensor_B, tensor_C, tensor_D,
visitor_args=visitor_args)()
print(f"CUTLASS duration: {duration:.2f} ms")<jupyter_output><empty_output> | examples/python/04_epilogue_visitor.ipynb/0 | {
"file_path": "examples/python/04_epilogue_visitor.ipynb",
"repo_id": "examples",
"token_count": 1999
} | 15 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/arch/copy.hpp>
#include <cute/tensor.hpp>
namespace cute
{
/**
* concept Copy_Traits
* {
* using ThrID = // Logical thread id (tid) -> tidx
*
* using SrcLayout = // (Logical src thread id (tid), Logical src value id (vid)) -> bit
* using DstLayout = // (Logical dst thread id (tid), Logical dst value id (vid)) -> bit
* using RefLayout = // (Logical ref thread id (tid), Logical ref value id (vid)) -> bit
* };
*
* The abstract bit ordering of the Copy_Traits (the codomain of SrcLayout, DstLayout, and RefLayout)
* is arbitrary and only used to construct maps
* (ref-tid,ref-vid) -> (src-tid,src-vid)
* (ref-tid,ref-vid) -> (dst-tid,dst-vid)
* in TiledCopy. The Layout_TV in TiledCopy is in accordance with the RefLayout of a Traits, then mapped to
* the Src or Dst (tid,vid) representation on demand.
*
*/
template <class CopyOperation, class... CopyOpArgs>
struct Copy_Traits
{
static_assert(dependent_false<CopyOperation>, "Copy_Traits not implemented for this CopyOperation.");
};
template <class S, class D>
struct Copy_Traits<UniversalCopy<S,D>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
};
template <int MaxVecBits>
struct Copy_Traits<AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,_1>, Stride<_0,_0>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,_1>, Stride<_0,_0>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
};
//
// Generic copy_unpack for common argument-based Copy_Traits
//
template <class CopyOp, class... Args,
class SEngine, class SLayout,
class DEngine, class DLayout>
CUTE_HOST_DEVICE constexpr
void
copy_unpack(Copy_Traits<CopyOp,Args...> const&,
Tensor<SEngine,SLayout> const& src,
Tensor<DEngine,DLayout> & dst)
{
// Specializations can generalize on these checks
//static_assert(is_smem<TS>::value, "Expected smem for this Copy_Traits<CopyOp>");
//static_assert(is_rmem<TD>::value, "Expected rmem for this Copy_Traits<CopyOp>");
using RegistersSrc = typename CopyOp::SRegisters;
using RegistersDst = typename CopyOp::DRegisters;
using RegTypeSrc = typename remove_extent<RegistersSrc>::type;
using RegTypeDst = typename remove_extent<RegistersDst>::type;
constexpr int RegNumSrc = extent<RegistersSrc>::value;
constexpr int RegNumDst = extent<RegistersDst>::value;
Tensor rS = recast<RegTypeSrc>(src);
Tensor rD = recast<RegTypeDst>(dst);
CUTE_STATIC_ASSERT_V(size(rS) == Int<RegNumSrc>{},
"Copy_Traits: src failed to vectorize into registers. Layout is incompatible with this CopyOp.");
CUTE_STATIC_ASSERT_V(size(rD) == Int<RegNumDst>{},
"Copy_Traits: dst failed to vectorize into registers. Layout is incompatible with this CopyOp.");
detail::explode(detail::CallCOPY<CopyOp>{},
rS, make_int_sequence<RegNumSrc>{},
rD, make_int_sequence<RegNumDst>{});
}
//
// Accept mutable temporaries
//
template <class CopyOp, class... Args,
class SEngine, class SLayout,
class DEngine, class DLayout>
CUTE_HOST_DEVICE constexpr
void
copy_unpack(Copy_Traits<CopyOp,Args...> const& traits,
Tensor<SEngine,SLayout> const& src,
Tensor<DEngine,DLayout> && dst)
{
copy_unpack(traits, src, dst);
}
} // end namespace cute
| include/cute/atom/copy_traits.hpp/0 | {
"file_path": "include/cute/atom/copy_traits.hpp",
"repo_id": "include",
"token_count": 1984
} | 16 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#if defined(__CUDACC__) || defined(_NVHPC_CUDA)
# define CUTE_HOST_DEVICE __forceinline__ __host__ __device__
# define CUTE_DEVICE __forceinline__ __device__
# define CUTE_HOST __forceinline__ __host__
#else
# define CUTE_HOST_DEVICE inline
# define CUTE_DEVICE inline
# define CUTE_HOST inline
#endif // CUTE_HOST_DEVICE, CUTE_DEVICE
#if defined(__CUDACC_RTC__)
# define CUTE_HOST_RTC CUTE_HOST_DEVICE
#else
# define CUTE_HOST_RTC CUTE_HOST
#endif
#if !defined(__CUDACC_RTC__) && !defined(__clang__) && \
(defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA))
# define CUTE_UNROLL #pragma unroll
# define CUTE_NO_UNROLL #pragma unroll 1
#elif defined(__CUDACC_RTC__) || defined(__clang__)
# define CUTE_UNROLL _Pragma("unroll")
# define CUTE_NO_UNROLL _Pragma("unroll 1")
#else
# define CUTE_UNROLL
# define CUTE_NO_UNROLL
#endif // CUTE_UNROLL
#if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)
# define CUTE_INLINE_CONSTANT static const __device__
#else
# define CUTE_INLINE_CONSTANT static constexpr
#endif
// __grid_constant__ was introduced in CUDA 11.7.
#if ((__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 7)))
# define CUTE_GRID_CONSTANT_SUPPORTED
#endif
// __grid_constant__ can be enabled only on SM70+.
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700))
# define CUTE_GRID_CONSTANT_ENABLED
#endif
#if ! defined(CUTE_GRID_CONSTANT)
# if defined(CUTE_GRID_CONSTANT_SUPPORTED) && defined(CUTE_GRID_CONSTANT_ENABLED)
# define CUTE_GRID_CONSTANT __grid_constant__
# else
# define CUTE_GRID_CONSTANT
# endif
#endif
// Some versions of GCC < 11 have trouble deducing that a
// function with "auto" return type and all of its returns in an "if
// constexpr ... else" statement must actually return. Thus, GCC
// emits spurious "missing return statement" build warnings.
// Developers can suppress these warnings by using the
// CUTE_GCC_UNREACHABLE macro, which must be followed by a semicolon.
// It's harmless to use the macro for other GCC versions or other
// compilers, but it has no effect.
#if ! defined(CUTE_GCC_UNREACHABLE)
# if defined(__GNUC__)
# define CUTE_GCC_UNREACHABLE __builtin_unreachable()
# else
# define CUTE_GCC_UNREACHABLE
# endif
#endif
#if defined(_MSC_VER)
// Provides support for alternative operators 'and', 'or', and 'not'
# include <iso646.h>
#endif // _MSC_VER
#if defined(__CUDACC_RTC__)
# define CUTE_STL_NAMESPACE cuda::std
# define CUTE_STL_NAMESPACE_IS_CUDA_STD
#else
# define CUTE_STL_NAMESPACE std
#endif
//
// Assertion helpers
//
#if defined(__CUDACC_RTC__)
# include <cuda/std/cassert>
#else
# include <cassert>
#endif
#define CUTE_STATIC_V(x) decltype(x)::value
#define CUTE_STATIC_ASSERT static_assert
#define CUTE_STATIC_ASSERT_V(x,...) static_assert(decltype(x)::value, ##__VA_ARGS__)
// Fail and print a message. Typically used for notification of a compiler misconfiguration.
#if defined(__CUDA_ARCH__)
# define CUTE_INVALID_CONTROL_PATH(x) assert(0 && x); printf(x); __brkpt()
#else
# define CUTE_INVALID_CONTROL_PATH(x) assert(0 && x); printf(x)
#endif
//
// IO
//
#if !defined(__CUDACC_RTC__)
# include <cstdio>
# include <iostream>
# include <iomanip>
#endif
//
// Support
//
#include <cute/util/type_traits.hpp>
//
// Basic types
//
#include <cute/numeric/numeric_types.hpp>
//
// Debugging utilities
//
#include <cute/util/print.hpp>
#include <cute/util/debug.hpp>
| include/cute/config.hpp/0 | {
"file_path": "include/cute/config.hpp",
"repo_id": "include",
"token_count": 1930
} | 17 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cute/util/print.hpp"
#include "cute/util/type_traits.hpp"
#include "cute/numeric/math.hpp"
namespace cute
{
// A constant value: short name and type-deduction for fast compilation
template <auto v>
struct C {
using type = C<v>;
static constexpr auto value = v;
using value_type = decltype(v);
CUTE_HOST_DEVICE constexpr operator value_type() const noexcept { return value; }
CUTE_HOST_DEVICE constexpr value_type operator()() const noexcept { return value; }
};
// Deprecate
template <class T, T v>
using constant = C<v>;
template <bool b>
using bool_constant = C<b>;
using true_type = bool_constant<true>;
using false_type = bool_constant<false>;
// A more std:: conforming integral_constant that enforces type but interops with C<v>
template <class T, T v>
struct integral_constant : C<v> {
using type = integral_constant<T,v>;
static constexpr T value = v;
using value_type = T;
// Disambiguate C<v>::operator value_type()
//CUTE_HOST_DEVICE constexpr operator value_type() const noexcept { return value; }
CUTE_HOST_DEVICE constexpr value_type operator()() const noexcept { return value; }
};
//
// Traits
//
// Use cute::is_std_integral<T> to match built-in integral types (int, int64_t, unsigned, etc)
// Use cute::is_integral<T> to match both built-in integral types AND static integral types.
template <class T>
struct is_integral : bool_constant<is_std_integral<T>::value> {};
template <auto v>
struct is_integral<C<v> > : true_type {};
template <class T, T v>
struct is_integral<integral_constant<T,v>> : true_type {};
// is_static detects if an (abstract) value is defined completely by it's type (no members)
template <class T>
struct is_static : bool_constant<is_empty<remove_cvref_t<T>>::value> {};
template <class T>
constexpr bool is_static_v = is_static<T>::value;
// is_constant detects if a type is a static integral type and if v is equal to a value
template <auto n, class T>
struct is_constant : false_type {};
template <auto n, class T>
struct is_constant<n, T const > : is_constant<n,T> {};
template <auto n, class T>
struct is_constant<n, T const&> : is_constant<n,T> {};
template <auto n, class T>
struct is_constant<n, T &> : is_constant<n,T> {};
template <auto n, class T>
struct is_constant<n, T &&> : is_constant<n,T> {};
template <auto n, auto v>
struct is_constant<n, C<v> > : bool_constant<v == n> {};
template <auto n, class T, T v>
struct is_constant<n, integral_constant<T,v>> : bool_constant<v == n> {};
//
// Specializations
//
template <int v>
using Int = C<v>;
using _m32 = Int<-32>;
using _m24 = Int<-24>;
using _m16 = Int<-16>;
using _m12 = Int<-12>;
using _m10 = Int<-10>;
using _m9 = Int<-9>;
using _m8 = Int<-8>;
using _m7 = Int<-7>;
using _m6 = Int<-6>;
using _m5 = Int<-5>;
using _m4 = Int<-4>;
using _m3 = Int<-3>;
using _m2 = Int<-2>;
using _m1 = Int<-1>;
using _0 = Int<0>;
using _1 = Int<1>;
using _2 = Int<2>;
using _3 = Int<3>;
using _4 = Int<4>;
using _5 = Int<5>;
using _6 = Int<6>;
using _7 = Int<7>;
using _8 = Int<8>;
using _9 = Int<9>;
using _10 = Int<10>;
using _12 = Int<12>;
using _16 = Int<16>;
using _24 = Int<24>;
using _32 = Int<32>;
using _64 = Int<64>;
using _96 = Int<96>;
using _128 = Int<128>;
using _192 = Int<192>;
using _256 = Int<256>;
using _384 = Int<384>;
using _512 = Int<512>;
using _768 = Int<768>;
using _1024 = Int<1024>;
using _2048 = Int<2048>;
using _4096 = Int<4096>;
using _8192 = Int<8192>;
using _16384 = Int<16384>;
using _32768 = Int<32768>;
using _65536 = Int<65536>;
using _131072 = Int<131072>;
using _262144 = Int<262144>;
using _524288 = Int<524288>;
/***************/
/** Operators **/
/***************/
#define CUTE_LEFT_UNARY_OP(OP) \
template <auto t> \
CUTE_HOST_DEVICE constexpr \
C<(OP t)> operator OP (C<t>) { \
return {}; \
}
#define CUTE_RIGHT_UNARY_OP(OP) \
template <auto t> \
CUTE_HOST_DEVICE constexpr \
C<(t OP)> operator OP (C<t>) { \
return {}; \
}
#define CUTE_BINARY_OP(OP) \
template <auto t, auto u> \
CUTE_HOST_DEVICE constexpr \
C<(t OP u)> operator OP (C<t>, C<u>) { \
return {}; \
}
CUTE_LEFT_UNARY_OP(+);
CUTE_LEFT_UNARY_OP(-);
CUTE_LEFT_UNARY_OP(~);
CUTE_LEFT_UNARY_OP(!);
CUTE_LEFT_UNARY_OP(*);
CUTE_BINARY_OP( +);
CUTE_BINARY_OP( -);
CUTE_BINARY_OP( *);
CUTE_BINARY_OP( /);
CUTE_BINARY_OP( %);
CUTE_BINARY_OP( &);
CUTE_BINARY_OP( |);
CUTE_BINARY_OP( ^);
CUTE_BINARY_OP(<<);
CUTE_BINARY_OP(>>);
CUTE_BINARY_OP(&&);
CUTE_BINARY_OP(||);
CUTE_BINARY_OP(==);
CUTE_BINARY_OP(!=);
CUTE_BINARY_OP( >);
CUTE_BINARY_OP( <);
CUTE_BINARY_OP(>=);
CUTE_BINARY_OP(<=);
#undef CUTE_BINARY_OP
#undef CUTE_LEFT_UNARY_OP
#undef CUTE_RIGHT_UNARY_OP
//
// Mixed static-dynamic special cases
//
template <auto t, class U,
__CUTE_REQUIRES(is_std_integral<U>::value && t == 0)>
CUTE_HOST_DEVICE constexpr
C<0>
operator*(C<t>, U) {
return {};
}
template <class U, auto t,
__CUTE_REQUIRES(is_std_integral<U>::value && t == 0)>
CUTE_HOST_DEVICE constexpr
C<0>
operator*(U, C<t>) {
return {};
}
template <auto t, class U,
__CUTE_REQUIRES(is_std_integral<U>::value && t == 0)>
CUTE_HOST_DEVICE constexpr
C<0>
operator/(C<t>, U) {
return {};
}
template <class U, auto t,
__CUTE_REQUIRES(is_std_integral<U>::value && (t == 1 || t == -1))>
CUTE_HOST_DEVICE constexpr
C<0>
operator%(U, C<t>) {
return {};
}
template <auto t, class U,
__CUTE_REQUIRES(is_std_integral<U>::value && t == 0)>
CUTE_HOST_DEVICE constexpr
C<0>
operator%(C<t>, U) {
return {};
}
template <auto t, class U,
__CUTE_REQUIRES(is_std_integral<U>::value && t == 0)>
CUTE_HOST_DEVICE constexpr
C<0>
operator&(C<t>, U) {
return {};
}
template <class U, auto t,
__CUTE_REQUIRES(is_std_integral<U>::value && t == 0)>
CUTE_HOST_DEVICE constexpr
C<0>
operator&(U, C<t>) {
return {};
}
template <auto t, class U,
__CUTE_REQUIRES(is_std_integral<U>::value && !bool(t))>
CUTE_HOST_DEVICE constexpr
C<false>
operator&&(C<t>, U) {
return {};
}
template <auto t, class U,
__CUTE_REQUIRES(is_std_integral<U>::value && !bool(t))>
CUTE_HOST_DEVICE constexpr
C<false>
operator&&(U, C<t>) {
return {};
}
template <class U, auto t,
__CUTE_REQUIRES(is_std_integral<U>::value && bool(t))>
CUTE_HOST_DEVICE constexpr
C<true>
operator||(C<t>, U) {
return {};
}
template <class U, auto t,
__CUTE_REQUIRES(is_std_integral<U>::value && bool(t))>
CUTE_HOST_DEVICE constexpr
C<true>
operator||(U, C<t>) {
return {};
}
//
// Named functions from math.hpp
//
#define CUTE_NAMED_UNARY_FN(OP) \
template <auto t> \
CUTE_HOST_DEVICE constexpr \
C<OP(t)> OP (C<t>) { \
return {}; \
}
#define CUTE_NAMED_BINARY_FN(OP) \
template <auto t, auto u> \
CUTE_HOST_DEVICE constexpr \
C<OP(t,u)> OP (C<t>, C<u>) { \
return {}; \
} \
template <auto t, class U, \
__CUTE_REQUIRES(is_std_integral<U>::value)> \
CUTE_HOST_DEVICE constexpr \
auto OP (C<t>, U u) { \
return OP(t,u); \
} \
template <class T, auto u, \
__CUTE_REQUIRES(is_std_integral<T>::value)> \
CUTE_HOST_DEVICE constexpr \
auto OP (T t, C<u>) { \
return OP(t,u); \
}
CUTE_NAMED_UNARY_FN(abs);
CUTE_NAMED_UNARY_FN(signum);
CUTE_NAMED_UNARY_FN(has_single_bit);
CUTE_NAMED_BINARY_FN(max);
CUTE_NAMED_BINARY_FN(min);
CUTE_NAMED_BINARY_FN(shiftl);
CUTE_NAMED_BINARY_FN(shiftr);
CUTE_NAMED_BINARY_FN(gcd);
CUTE_NAMED_BINARY_FN(lcm);
#undef CUTE_NAMED_UNARY_FN
#undef CUTE_NAMED_BINARY_FN
//
// Other functions
//
template <auto t, auto u>
CUTE_HOST_DEVICE constexpr
C<t / u>
safe_div(C<t>, C<u>) {
static_assert(t % u == 0, "Static safe_div requires t % u == 0");
return {};
}
template <auto t, class U,
__CUTE_REQUIRES(is_std_integral<U>::value)>
CUTE_HOST_DEVICE constexpr
auto
safe_div(C<t>, U u) {
return t / u;
}
template <class T, auto u,
__CUTE_REQUIRES(is_std_integral<T>::value)>
CUTE_HOST_DEVICE constexpr
auto
safe_div(T t, C<u>) {
return t / u;
}
template <class TrueType, class FalseType>
CUTE_HOST_DEVICE constexpr
decltype(auto)
conditional_return(true_type, TrueType&& t, FalseType&&) {
return static_cast<TrueType&&>(t);
}
template <class TrueType, class FalseType>
CUTE_HOST_DEVICE constexpr
decltype(auto)
conditional_return(false_type, TrueType&&, FalseType&& f) {
return static_cast<FalseType&&>(f);
}
// TrueType and FalseType must have a common type
template <class TrueType, class FalseType>
CUTE_HOST_DEVICE constexpr
auto
conditional_return(bool b, TrueType const& t, FalseType const& f) {
return b ? t : f;
}
// TrueType and FalseType don't require a common type
template <bool b, class TrueType, class FalseType>
CUTE_HOST_DEVICE constexpr
auto
conditional_return(TrueType const& t, FalseType const& f) {
if constexpr (b) {
return t;
} else {
return f;
}
}
template <class Trait>
CUTE_HOST_DEVICE constexpr
auto
static_value()
{
if constexpr (is_std_integral<decltype(Trait::value)>::value) {
return Int<Trait::value>{};
} else {
return Trait::value;
}
CUTE_GCC_UNREACHABLE;
}
//
// Display utilities
//
template <auto Value>
CUTE_HOST_DEVICE void print(C<Value>) {
printf("_");
::cute::print(Value);
}
#if !defined(__CUDACC_RTC__)
template <auto t>
CUTE_HOST std::ostream& operator<<(std::ostream& os, C<t> const&) {
return os << "_" << t;
}
#endif
namespace detail {
// parse_int_digits takes a variadic number of digits and converts them into an int
template <class... Ts>
constexpr uint64_t parse_int_digits(uint64_t result, int digit, Ts... digits)
{
if constexpr (sizeof...(Ts) == 0) {
return 10 * result + digit;
} else {
return parse_int_digits(10 * result + digit, digits...);
}
}
} // end namespace detail
// This user-defined literal operator allows cute::constant written as literals. For example,
//
// auto var = 32_c;
//
// var has type cute::constant<int,32>.
//
template <char... digits>
constexpr cute::constant<int,detail::parse_int_digits(0, (digits - '0')...)> operator "" _c()
{
static_assert((('0' <= digits && digits <= '9') && ...),
"Expected 0 <= digit <= 9 for each digit of the integer.");
return {};
}
} // end namespace cute
| include/cute/numeric/integral_constant.hpp/0 | {
"file_path": "include/cute/numeric/integral_constant.hpp",
"repo_id": "include",
"token_count": 6699
} | 18 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
//
// CUDA compatible print and printf
//
namespace cute
{
CUTE_HOST_DEVICE
int
num_digits(int x)
{
return (x < 10 ? 1 :
(x < 100 ? 2 :
(x < 1000 ? 3 :
(x < 10000 ? 4 :
(x < 100000 ? 5 :
(x < 1000000 ? 6 :
(x < 10000000 ? 7 :
(x < 100000000 ? 8 :
(x < 1000000000 ? 9 :
10)))))))));
}
//
// print dispatcher
//
CUTE_HOST_DEVICE
void
print(char c) {
printf("%c", c);
}
CUTE_HOST_DEVICE
void
print(signed char a) {
printf("%d", static_cast<int>(a));
}
CUTE_HOST_DEVICE
void
print(unsigned char a) {
printf("%u", static_cast<unsigned int>(a));
}
CUTE_HOST_DEVICE
void
print(short a) {
printf("%hd", a);
}
CUTE_HOST_DEVICE
void
print(unsigned short a) {
printf("%hu", a);
}
CUTE_HOST_DEVICE
void
print(int a) {
printf("%d", a);
}
CUTE_HOST_DEVICE
void
print(unsigned int a) {
printf("%u", a);
}
CUTE_HOST_DEVICE
void
print(long a) {
printf("%ld", a);
}
CUTE_HOST_DEVICE
void
print(unsigned long a) {
printf("%lu", a);
}
CUTE_HOST_DEVICE
void
print(long long a) {
printf("%lld", a);
}
CUTE_HOST_DEVICE
void
print(unsigned long long a) {
printf("%llu", a);
}
CUTE_HOST_DEVICE
void
print(float a) {
printf("%f", a);
}
CUTE_HOST_DEVICE
void
print(double a) {
printf("%f", a);
}
template <class... T>
CUTE_HOST_DEVICE
void
print(char const* format, T const&... t) {
printf(format, t...);
}
CUTE_HOST_DEVICE
void
print(char const* format) {
printf("%s", format);
}
//
// pretty printing
//
template <class T>
CUTE_HOST_DEVICE void
pretty_print(T const& v) {
printf(" "); print(v);
}
CUTE_HOST_DEVICE void
pretty_print(bool const& v) {
printf("%*d", 3, int(v));
}
CUTE_HOST_DEVICE void
pretty_print(int32_t const& v) {
printf("%*d", 5, v);
}
CUTE_HOST_DEVICE void
pretty_print(uint32_t const& v) {
printf("%*d", 5, v);
}
CUTE_HOST_DEVICE void
pretty_print(int64_t const& v) {
printf("%*lld", 5, static_cast<long long>(v));
}
CUTE_HOST_DEVICE void
pretty_print(uint64_t const& v) {
printf("%*llu", 5, static_cast<unsigned long long>(v));
}
CUTE_HOST_DEVICE void
pretty_print(half_t const& v) {
printf("%*.2f", 8, float(v));
}
CUTE_HOST_DEVICE void
pretty_print(float const& v) {
printf("%*.2e", 10, v);
}
CUTE_HOST_DEVICE void
pretty_print(double const& v) {
printf("%*.3e", 11, v);
}
} // end namespace cute
| include/cute/util/print.hpp/0 | {
"file_path": "include/cute/util/print.hpp",
"repo_id": "include",
"token_count": 1621
} | 19 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply-accumulate specialzied for SM89
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
#if (__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 4)
# define CUTLASS_ARCH_MMA_SM89_SUPPORTED 1
#endif
#if defined(CUTLASS_ARCH_MMA_SM89_SUPPORTED) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 890)
# define CUTLASS_ARCH_MMA_SM89_ENABLED
#endif
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
namespace detail {
// Whether the Mma uses as SM89 staged accumulation policy
template <class Operator>
static constexpr bool is_sm89_staged_policy_v =
(
// ElementA must be FP8
platform::is_same<typename Operator::ElementA, cutlass::float_e4m3_t>::value ||
platform::is_same<typename Operator::ElementA, cutlass::float_e5m2_t>::value
) &&
(
// ElementB must be FP8
platform::is_same<typename Operator::ElementB, cutlass::float_e4m3_t>::value ||
platform::is_same<typename Operator::ElementB, cutlass::float_e5m2_t>::value
) &&
(
// The instruction shape must be 16x8x32
Operator::ArchMmaOperator::Shape::kM == 16 &&
Operator::ArchMmaOperator::Shape::kN == 8 &&
Operator::ArchMmaOperator::Shape::kK == 32
) &&
(
// The operator must be OpMultiplyAdd (default)
platform::is_same<typename Operator::MathOperator, OpMultiplyAdd>::value
);
} // namespace detail
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16832 - Float {E4M3, E5M2}, FP32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation - F32 = fe4m3 * fe4m3 + F32
template <typename Operator_>
struct Mma<
gemm::GemmShape<16, 8, 32>,
32,
cutlass::float_e4m3_t,
layout::RowMajor,
cutlass::float_e4m3_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = cutlass::float_e4m3_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e4m3_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = Operator_;
using ArchTag = arch::Sm89;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm(
"mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e4m3.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]),
"r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation - F32 = fe4m3 * fe5m2 + F32
template <typename Operator_>
struct Mma<
gemm::GemmShape<16, 8, 32>,
32,
cutlass::float_e4m3_t,
layout::RowMajor,
cutlass::float_e5m2_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = cutlass::float_e4m3_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e5m2_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = Operator_;
using ArchTag = arch::Sm89;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm(
"mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e5m2.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]),
"r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation - F32 = fe5m2 * fe4m3 + F32
template <typename Operator_>
struct Mma<
gemm::GemmShape<16, 8, 32>,
32,
cutlass::float_e5m2_t,
layout::RowMajor,
cutlass::float_e4m3_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = cutlass::float_e5m2_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e4m3_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = Operator_;
using ArchTag = arch::Sm89;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm(
"mma.sync.aligned.m16n8k32.row.col.f32.e5m2.e4m3.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]),
"r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Matrix multiply-add operation - F32 = fe5m2 * fe5m2 + F32
template <typename Operator_>
struct Mma<
gemm::GemmShape<16, 8, 32>,
32,
cutlass::float_e5m2_t,
layout::RowMajor,
cutlass::float_e5m2_t,
layout::ColumnMajor,
float,
layout::RowMajor,
Operator_> {
static_assert(platform::is_same<Operator_, OpMultiplyAdd>::value ||
platform::is_same<Operator_, OpMultiplyAddFastAccum>::value,
"Invalid operator for SM89 FP8 instruction");
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = cutlass::float_e5m2_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<ElementA, 16>;
using ElementB = cutlass::float_e5m2_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<ElementB, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = Operator_;
using ArchTag = arch::Sm89;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM89_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm(
"mma.sync.aligned.m16n8k32.row.col.f32.e5m2.e5m2.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]),
"r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
} // namespace arch
} // namespace cutlass
| include/cutlass/arch/mma_sm89.h/0 | {
"file_path": "include/cutlass/arch/mma_sm89.h",
"repo_id": "include",
"token_count": 4621
} | 20 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a proxy class for storing non-standard 16-bit floating point values with
8 bits of exponent and 7 bit of mantissa.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include "cutlass/floating_point_nvrtc.h"
#else
#include <cmath>
#include <limits>
#include <cstdint>
#include <cstring>
#endif
#include <cuda_bf16.h>
#include "cutlass/cutlass.h"
#include "cutlass/platform/platform.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Floating-point type with 8 bits of exponent and 7 bits of mantissa.
struct alignas(2) bfloat16_t {
//
// Data members
//
/// Storage type
uint16_t storage;
//
// Methods
//
/// Constructs from an unsigned short
CUTLASS_HOST_DEVICE
static bfloat16_t bitcast(uint16_t x) {
bfloat16_t h;
h.storage = x;
return h;
}
private:
struct from_32_bit_integer_t {};
static constexpr from_32_bit_integer_t from_32_bit_integer{};
template<class T>
CUTLASS_HOST_DEVICE
explicit bfloat16_t(from_32_bit_integer_t, T x) {
static_assert(cutlass::platform::is_integral<T>::value && sizeof(T) == 4, "Requires 32-bit integer");
float flt = static_cast<float>(x);
uint32_t bits;
#if defined(__CUDA_ARCH__)
bits = reinterpret_cast<uint32_t &>(flt);
#else
std::memcpy(&bits, &flt, sizeof(bits));
#endif
storage = uint16_t(bits >> 16);
}
public:
/// Default constructor
bfloat16_t() = default;
/// Floating-point conversion - round toward nearest
CUTLASS_HOST_DEVICE
explicit bfloat16_t(float x) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) && (__CUDACC_VER_MAJOR__ >= 11)
asm("cvt.rn.bf16.f32 %0, %1;\n" : "=h"(storage) : "f"(x));
#else
uint32_t bits;
#if defined(__CUDA_ARCH__)
bits = reinterpret_cast<uint32_t &>(x);
#else
std::memcpy(&bits, &x, sizeof(bits));
#endif
if ((bits & 0x7f800000) != 0x7f800000) {
bool mantissa_bit = ((bits & (1 << 16)) != 0);
bool round_bit = ((bits & (1 << 15)) != 0);
bool sticky_bit = ((bits & ((1 << 15) - 1)) != 0);
if ((round_bit && sticky_bit) || (round_bit && mantissa_bit)) {
bits += uint32_t(1 << 16);
}
}
else if (bits & ~0xff800000) {
bits = 0x7fffffff;
}
storage = uint16_t((bits >> 16) & 0xffff);
#endif
}
/// Floating-point conversion - round toward nearest
CUTLASS_HOST_DEVICE
explicit bfloat16_t(double x): bfloat16_t(float(x)) {
}
/// Integer conversion - round toward nearest
CUTLASS_HOST_DEVICE
explicit bfloat16_t(int x) : bfloat16_t(from_32_bit_integer, x) {}
CUTLASS_HOST_DEVICE
explicit bfloat16_t(uint32_t x) : bfloat16_t(from_32_bit_integer, x) {}
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
unsigned bits = (unsigned(storage) << 16);
#if defined(__CUDA_ARCH__)
return reinterpret_cast<float const &>(bits);
#else
float flt;
std::memcpy(&flt, &bits, sizeof(flt));
return flt;
#endif
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(float(*this));
}
/// Converts to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(float(*this));
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
return (float(*this) != 0.0f);
}
/// Obtains raw bits
CUTLASS_HOST_DEVICE
uint16_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((raw() & 0x8000) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((raw() >> 7) & 0x0ff);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 127;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(raw() & 0x7f);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool signbit(cutlass::bfloat16_t const& h) {
return h.signbit();
}
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t abs(cutlass::bfloat16_t const& h) {
return cutlass::bfloat16_t::bitcast(h.raw() & 0x7fff);
}
CUTLASS_HOST_DEVICE
bool isnan(cutlass::bfloat16_t const& h) {
return (h.exponent_biased() == 0x0ff) && h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isfinite(cutlass::bfloat16_t const& h) {
return (h.exponent_biased() != 0x0ff);
}
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t nan_bf16(const char*) {
// NVIDIA canonical NaN
return cutlass::bfloat16_t::bitcast(0x7fff);
}
CUTLASS_HOST_DEVICE
bool isinf(cutlass::bfloat16_t const& h) {
return (h.exponent_biased() == 0x0ff) && !h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isnormal(cutlass::bfloat16_t const& h) {
return h.exponent_biased() && h.exponent_biased() != 0x0ff;
}
CUTLASS_HOST_DEVICE
int fpclassify(cutlass::bfloat16_t const& h) {
int exp = h.exponent_biased();
int mantissa = h.mantissa();
if (exp == 0x0ff) {
if (mantissa) {
return FP_NAN;
}
else {
return FP_INFINITE;
}
}
else if (!exp) {
if (mantissa) {
return FP_SUBNORMAL;
}
else {
return FP_ZERO;
}
}
return FP_NORMAL;
}
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t sqrt(cutlass::bfloat16_t const& h) {
#if defined(__CUDACC_RTC__)
return cutlass::bfloat16_t(sqrtf(float(h)));
#else
return cutlass::bfloat16_t(std::sqrt(float(h)));
#endif
}
CUTLASS_HOST_DEVICE
bfloat16_t copysign(bfloat16_t const& a, bfloat16_t const& b) {
uint16_t a_bits;
uint16_t b_bits;
#if defined(__CUDA_ARCH__)
a_bits = reinterpret_cast<uint16_t const &>(a);
b_bits = reinterpret_cast<uint16_t const &>(b);
#else
std::memcpy(&a_bits, &a, sizeof(a_bits));
std::memcpy(&b_bits, &b, sizeof(b_bits));
#endif
uint16_t a_mag = (a_bits & 0x7fff);
uint16_t b_sign = (b_bits & 0x8000);
uint16_t result = (a_mag | b_sign);
return bfloat16_t::bitcast(result);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Standard Library operations and definitions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace std {
#if !defined(__CUDACC_RTC__)
/// Numeric limits
template <>
struct numeric_limits<cutlass::bfloat16_t> {
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_infinity = true;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
static std::float_denorm_style const has_denorm = std::denorm_present;
static bool const has_denorm_loss = true;
static std::float_round_style const round_style = std::round_to_nearest;
static bool const is_iec559 = false;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = 7;
/// Least positive value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t min() { return cutlass::bfloat16_t::bitcast(0x01); }
/// Minimum finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t lowest() { return cutlass::bfloat16_t::bitcast(0xff7f); }
/// Maximum finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t max() { return cutlass::bfloat16_t::bitcast(0x7f7f); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t epsilon() { return cutlass::bfloat16_t::bitcast(0x1000); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t round_error() { return cutlass::bfloat16_t(0.5f); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t infinity() { return cutlass::bfloat16_t::bitcast(0x7f80); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t quiet_NaN() { return cutlass::bfloat16_t::bitcast(0x7fff); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t signaling_NaN() { return cutlass::bfloat16_t::bitcast(0x7fff); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t denorm_min() { return cutlass::bfloat16_t::bitcast(0x1); }
};
#endif
} // namespace std
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Arithmetic operators
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool operator==(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) == float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator!=(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) != float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) < float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<=(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) <= float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) > float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>=(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) >= float(rhs);
}
CUTLASS_HOST_DEVICE
bfloat16_t operator+(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return bfloat16_t(float(lhs) + float(rhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t operator-(bfloat16_t const& lhs) {
return bfloat16_t(-float(lhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t operator-(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return bfloat16_t(float(lhs) - float(rhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t operator*(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return bfloat16_t(float(lhs) * float(rhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t operator/(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return bfloat16_t(float(lhs) / float(rhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator+=(bfloat16_t & lhs, bfloat16_t const& rhs) {
lhs = bfloat16_t(float(lhs) + float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator-=(bfloat16_t & lhs, bfloat16_t const& rhs) {
lhs = bfloat16_t(float(lhs) - float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator*=(bfloat16_t & lhs, bfloat16_t const& rhs) {
lhs = bfloat16_t(float(lhs) * float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator/=(bfloat16_t & lhs, bfloat16_t const& rhs) {
lhs = bfloat16_t(float(lhs) / float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator++(bfloat16_t & lhs) {
float tmp(lhs);
++tmp;
lhs = bfloat16_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator--(bfloat16_t & lhs) {
float tmp(lhs);
--tmp;
lhs = bfloat16_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t operator++(bfloat16_t & lhs, int) {
bfloat16_t ret(lhs);
float tmp(lhs);
tmp++;
lhs = bfloat16_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
bfloat16_t operator--(bfloat16_t & lhs, int) {
bfloat16_t ret(lhs);
float tmp(lhs);
tmp--;
lhs = bfloat16_t(tmp);
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// User-defined literals
//
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t operator "" _bf16(long double x) {
return cutlass::bfloat16_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t operator "" _bf16(unsigned long long int x) {
return cutlass::bfloat16_t(int(x));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/bfloat16.h/0 | {
"file_path": "include/cutlass/bfloat16.h",
"repo_id": "include",
"token_count": 5124
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
This file contains definitions and utility functions for describing convolution problem sizes in terms of
activation (NHWC), filter (KRSC), output (NPQK), padding (pad_h, pad_w), stride (stride_h, stride_w), and
dilation (dilation_h, dilation_w). Furthermore, it defines helper functions to map CUTLASS's implicit gemm
tensor extents, sizes, and data types to that of the convolution's extents, sizes, and data types.
* Mapping convolutions to Gemm computation *
Cutlass implements convolutions with the Implicit Gemm algorithm. This algorithm performs a gemm
(general matrix-matrix multiply) on the convolution tensors Activation, Filter, and Output.
The underlying gemm operation follows the standard gemm definition:
C = A * B + C
A and B are input matrices
C is source and output matrix
For the three convolutional operators (Fprop, Dgrad, Wgrad), ImplicitGemm matrices A, B, and C are mapped
to convolution tensors Activation, Filter and Output as described in the table below.
___________________________________________________________________________
ConvolutionalOperator | A | B | C
___________________________________________________________________________
| | | | |
| Fprop | Activation | Filter | Output |
| Dgrad | Output | Filter | Activation |
| Wgrad | Output | Activation | Filter |
___________________________________________________________________________
In convolution codebase, DO NOT mix using (A, B, C) with (Activation, Filter, Output).
For example, it's confusing and error prone to document a convolution class or function
as operating on "A, B, Output." Instead, use the mapping functions below,
and adhere to using either A, B, C or Activation, Filter, Output.
Map elements' data types (ImplicitGemm -> Conv): GemmToConvElementMap
Map elements' data types (Conv -> ImplicitGemm): ConvToGemmElementMap
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm_enumerated_types.h"
#include "cutlass/matrix_coord.h"
namespace cutlass {
namespace conv {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Convolutional operator
enum class Operator {
kFprop,
kDgrad,
kWgrad,
kDeconv
};
/// Distinguishes convolution from cross correlation
enum class Mode {
kCrossCorrelation,
kConvolution
};
/// Selects among several implementation variants trading off performance with simplicity
enum class IteratorAlgorithm {
kAnalytic, ///< functionally correct in all cases but lower performance
kOptimized, ///< optimized for R <= 32, S <= 32 and unity-stride dgrad
kFixedChannels, ///< Analytic algorithm optimized for fixed channel count (C == AccessSize)
kFewChannels, ///< Analytic algorithm optimized for few channels (C divisible by AccessSize)
kFixedStrideDilation ///< Optimized for fixed stride and dilation
};
/// Distinguishes among partial specializations that accelerate certain problems where convolution
/// stride is unit.
enum class StrideSupport {
kStrided, ///< arbitrary convolution stride
kUnity, ///< unit convolution stride
kFixed ///< fixed convolution stride
};
/// Identifies split-K mode
enum class SplitKMode {
kNone,
kSerial,
kParallel
};
/// Identifies group mode
enum class GroupMode {
kNone,
kSingleGroup, ///< One CTA calculates one group or less
kMultipleGroup, ///< One CTA calculates multiple groups
kDepthwise ///< One CTA calculates cta_n groups (problem_size.C == problem_size.K == problem_size.groups)
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Shape of a tensor
template <
int N = 1,
int H = 1,
int W = 1,
int C = 1
>
struct TensorNHWCShape {
static int const kN = N;
static int const kH = H;
static int const kW = W;
static int const kC = C;
static int const kHW = H * W;
static int const kNHW = N * kHW;
static int const kNHWC = N * H * W * C;
static int const kCount = kNHWC;
//
// Static member functions
//
/// Returns a Coord object
CUTLASS_HOST_DEVICE
static Coord<4> toCoord() {
return make_Coord(kN, kH, kW, kC);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Shape of a conv2d stride, which controls how the filter convolves around the input volume
template <
/// Stride in horizontal direction
int u = 1,
/// Stride in vertical direction
int v = 1
>
struct Stride2D {
static int const kU = u;
static int const kV = v;
//
// Static member functions
//
/// Returns a Coord object
CUTLASS_HOST_DEVICE
static Coord<2> toCoord() {
return make_Coord(kU, kV);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/convolution.h/0 | {
"file_path": "include/cutlass/conv/convolution.h",
"repo_id": "include",
"token_count": 2239
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template wraps the tile access iterator concept to load whole tiles from tensors in
memory used for implicit GEMM convolution.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename TileAccessIterator_>
class TileIterator {
public:
using TileAccessIterator = TileAccessIterator_;
using Shape = typename TileAccessIterator::Shape;
using Element = typename TileAccessIterator::Element;
using Layout = typename TileAccessIterator::Layout;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = typename TileAccessIterator::ThreadMap;
using AccessType = typename TileAccessIterator::AccessType;
using TensorRef = typename TileAccessIterator::TensorRef;
using Index = typename TileAccessIterator::Index;
using LongIndex = typename TileAccessIterator::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = TileAccessIterator::kIteratorAlgorithm;
static StrideSupport const kStrideSupport = TileAccessIterator::kStrideSupport;
using Params = typename TileAccessIterator::Params;
static int const kConvDim = TileAccessIterator::kConvDim;
using ConvProblemSize = typename TileAccessIterator::ConvProblemSize;
static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<
Element,
ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
private:
/// Internal state
TileAccessIterator tile_access_iterator_;
public:
/// Constructor
CUTLASS_HOST_DEVICE
TileIterator(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
tile_access_iterator_(params, problem_size, ptr, thread_idx, threadblock_offset) { }
CUTLASS_HOST_DEVICE
static Params getParams(ConvProblemSize const &problem_size, Layout const &layout) {
return TileAccessIterator::getParams(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
tile_access_iterator_.set_iteration_index(index);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
tile_access_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIterator &operator++() {
tile_access_iterator_.advance();
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIterator operator++(int) {
TileIterator self(*this);
operator++();
return self;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.clear();
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[idx],
tile_access_iterator_.get() + pointer_offset,
tile_access_iterator_.valid()
);
++tile_access_iterator_;
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
tile_access_iterator_.set_iteration_index(0);
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void advance() {
tile_access_iterator_.advance();
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// dispatch to iterator implementation
return TileAccessIterator::can_implement(problem_size);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Strided Dgrad Tile Iterator
template <typename TileAccessIterator_>
class TileIteratorStridedDgrad {
public:
using TileAccessIterator = TileAccessIterator_;
using Shape = typename TileAccessIterator::Shape;
using Element = typename TileAccessIterator::Element;
using Layout = typename TileAccessIterator::Layout;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = typename TileAccessIterator::ThreadMap;
using AccessType = typename TileAccessIterator::AccessType;
using TensorRef = typename TileAccessIterator::TensorRef;
using Index = typename TileAccessIterator::Index;
using LongIndex = typename TileAccessIterator::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = TileAccessIterator::kIteratorAlgorithm;
static StrideSupport const kStrideSupport = TileAccessIterator::kStrideSupport;
using Params = typename TileAccessIterator::Params;
static int const kConvDim = TileAccessIterator::kConvDim;
using ConvProblemSize = typename TileAccessIterator::ConvProblemSize;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<
Element,
ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
private:
/// Internal state
TileAccessIterator tile_access_iterator_;
public:
/// Constructor (output gradient (Dy) OperandA ctor)
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
tile_access_iterator_(
params,
problem_size,
ptr,
thread_idx,
stride_h_divmod, stride_w_divmod,
start_r, start_s,
threadblock_offset) { }
/// Constructor (filter (w) OperandB ctor)
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
tile_access_iterator_(params,
problem_size,
ptr,
thread_idx,
start_r, start_s,
threadblock_offset) { }
CUTLASS_HOST_DEVICE
static Params getParams(ConvProblemSize const &problem_size, Layout const &layout) {
return TileAccessIterator::getParams(problem_size, layout);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
tile_access_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad &operator++() {
tile_access_iterator_.advance();
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad operator++(int) {
TileIteratorStridedDgrad self(*this);
operator++();
return self;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.clear();
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[c + s * ThreadMap::Iterations::kContiguous],
tile_access_iterator_.get() + pointer_offset,
tile_access_iterator_.valid()
);
++tile_access_iterator_;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
tile_access_iterator_.set_iteration_index(0);
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void advance() {
tile_access_iterator_.advance();
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// dispatch to iterator implementation
return TileAccessIterator::can_implement(problem_size);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv2d_tile_iterator.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv2d_tile_iterator.h",
"repo_id": "include",
"token_count": 3649
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dWgradOutputGradientTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
struct Params {
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
Conv3dProblemSize const &problem_size,
Layout const &layout
): layout(layout) {
}
};
private:
Params const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
int filter_k_[ThreadMap::Iterations::kContiguous];
int offset_nzpq_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientTileAccessIteratorAnalytic(
Params const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
// initialize filter_k for every contiguous iteration
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
filter_k_[c] = threadblock_offset.row() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
// initialize n, p, q offset for every strided iteration
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_nzpq_[s] = threadblock_offset.column() + thread_coord.strided()
+ s * ThreadMap::Delta::kStrided;
}
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_nzpq_) in GEMM-A by a CTA-K tile
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_nzpq_[s] += Shape::kColumn * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the output gradient tensor Dy that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int nzpq = offset_nzpq_[iteration_strided_];
int n = nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q);
int residual = nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q);
int z = residual / (problem_size_.P * problem_size_.Q);
residual = residual % (problem_size_.P * problem_size_.Q);
int p = residual / problem_size_.Q;
int q = residual % problem_size_.Q;
return TensorCoord(n, z, p, q, filter_k_[iteration_contiguous_]);
}
/// Returns true if the current coordinate is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.d() < problem_size_.Z &&
coord.h() < problem_size_.P &&
coord.w() < problem_size_.Q &&
coord.c() < problem_size_.K;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 2871
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implements several possible threadblock-swizzling functions mapping blockIdx to
Convolution problems.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
static int get_strided_dgrad_tile_m(
cutlass::conv::Conv2dProblemSize const &problem_size,
int tile_size_m) {
// CTAs in M dimension per starting filter position
int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, tile_size_m);
// Inflate number of CTAs in M dimension to cover every strating filter position even those that
// may fall out of valid MMA (Dy * w) but are needed to apply epilogue (beta * Dx_source)
// and point-wise fusion
int tile_m = tile_m_per_filter * int(problem_size.stride().product());
// There is a possible performance optimization here that leads up to 2x speeds than the current
// CUTLASS strided dgrad performance for stride > filter, i.e., stride={2x2} and filter={1x1})
//
// * Optimization *
// Only launch CTAs in M dimension which contribute to a row in Dx output
//
//
// * Constraints *
// (A) stride <= filter, for example, stride={2x2} and filter={3x3}:
// - (A.1): There are no constraints for this case and the optimization does
// affect this case functionality or performance.
// (B) stride > filter, for example, stride={2x2} and filter={1x1}:
// - (B.1): Dx output tensor should be zero initialized
// - (B.2): The kernel epilogue cannot apply beta. Thus, beta should be zero
return tile_m;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for strided dgrad convolution
struct StridedDgradHorizontalThreadblockSwizzle :
public gemm::threadblock::GemmHorizontalThreadblockSwizzle {
using Base = gemm::threadblock::GemmHorizontalThreadblockSwizzle;
CUTLASS_HOST_DEVICE
StridedDgradHorizontalThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
/// For ImplicitGemmConvolution Conv2d problem size: conv_operator(NPQK, NHWC, KRSC)
CUTLASS_HOST_DEVICE
static gemm::GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
gemm::GemmCoord tile_size,
int split_k_slices) {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
// compute number of tiles in m dimension
int tile_m = get_strided_dgrad_tile_m(problem_size, tile_size.m());
// compute number of tiles in n dimension
int tile_n = (implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n();
return gemm::GemmCoord(
tile_m,
tile_n,
split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// For GEMM problem size (MxNxK) (Do not use base class get_tiled_shape())
private:
using Base::get_tiled_shape;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for strided dgrad convolution
template <int N = 1>
struct StridedDgradIdentityThreadblockSwizzle :
public gemm::threadblock::GemmIdentityThreadblockSwizzle<N> {
using Base = gemm::threadblock::GemmIdentityThreadblockSwizzle<N>;
CUTLASS_HOST_DEVICE
StridedDgradIdentityThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
/// For ImplicitGemmConvolution Conv2d problem size: conv_operator(NPQK, NHWC, KRSC)
CUTLASS_HOST_DEVICE
static gemm::GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
gemm::GemmCoord tile_size,
int split_k_slices) {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
// compute number of tiles in m dimension
int tile_m = get_strided_dgrad_tile_m(problem_size, tile_size.m());
// compute number of tiles in n dimension
int tile_n = (implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n();
return gemm::GemmCoord(
tile_m,
tile_n,
split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// For GEMM problem size (MxNxK) (Do not use base class get_tiled_shape())
private:
using Base::get_tiled_shape;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for GEMMs
template <int N = 1, int Output_N = 1, int Output_P = 1, int Output_Q = 1>
struct DepthwiseDirect2dConvIdentityThreadblockSwizzle
: public gemm::threadblock::GemmIdentityThreadblockSwizzle<N> {
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvIdentityThreadblockSwizzle() {}
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
static gemm::GemmCoord get_tiled_shape(cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
gemm::GemmCoord tile_size,
int split_k_slices) {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
return gemm::GemmCoord(1,
(implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n(),
split_k_slices);
}
};
} // namespace threadblock
} // namespace conv
} // namespace cutlass
| include/cutlass/conv/threadblock/threadblock_swizzle.h/0 | {
"file_path": "include/cutlass/conv/threadblock/threadblock_swizzle.h",
"repo_id": "include",
"token_count": 2604
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This extends the contents of cutlass/functional.h with frequently used activation functions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/constants.h"
#include "cutlass/complex.h"
#include "cutlass/array.h"
#include "cutlass/half.h"
#include "cutlass/functional.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Identity operator
template <typename T>
struct Identity {
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
T operator()(T value) const {
return value;
}
};
template <typename T, int N>
struct Identity<Array<T, N> > {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> value) const {
return value;
}
};
/// Scale operator
template <typename T>
struct Scale {
struct Arguments {
using scale_type = T;
T scale = T(1);
};
CUTLASS_HOST_DEVICE
T operator()(T value, T scale) const {
multiplies<T> mul;
return mul(scale, value);
}
CUTLASS_HOST_DEVICE
T operator()(T value, Arguments args = Arguments()) const {
return this->operator()(value, args.scale);
}
};
template <typename T, int N>
struct Scale<Array<T, N>> {
using Arguments = typename Scale<T>::Arguments;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> values, T scale) const {
multiplies<Array<T, N>> mul;
return mul(scale, values);
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> values, Arguments args = Arguments()) const {
return this->operator()(values, args.scale);
}
};
/// Specialization to compose other activations with a defined unary operator
/// e.g. Scale<Identity<T>>
template <template <class> class Activation, typename T>
struct Scale<Activation<T>> {
using Arguments = typename Scale<T>::Arguments;
CUTLASS_HOST_DEVICE
T operator()(T value, typename Arguments::scale_type scale) const {
multiplies<T> mul;
Activation<T> act;
return mul(scale, act(value));
}
CUTLASS_HOST_DEVICE
T operator()(T value, Arguments args = Arguments()) const {
return this->operator()(value, args.scale);
}
};
/// ReLu operator - propagates NaNs
/// Always put threshold in the right hand side of max to propagate NaN.
template <typename T>
struct ReLu {
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
T operator()(T threshold, T value) const {
maximum<T> mx;
return mx(value, threshold);
}
CUTLASS_HOST_DEVICE
T operator()(T value) const {
maximum<T> mx;
return mx(value, T(0));
}
};
template <typename T>
using ReLU = ReLu<T>;
template <typename T, int N>
struct ReLu<Array<T, N>> {
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(T const & threshold, Array<T, N> const &frag) const {
maximum<Array<T, N>> mx;
return mx(frag, threshold);
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &frag) const {
maximum<Array<T, N>> mx;
return mx(frag, T(0));
}
};
// Generic clamp
template <typename T>
struct Clamp {
struct Arguments {
T lower_bound = CUTLASS_STL_NAMESPACE::numeric_limits<T>::lowest();
T upper_bound = CUTLASS_STL_NAMESPACE::numeric_limits<T>::max();
};
CUTLASS_HOST_DEVICE
T operator()(T const& value, T const& lower_bound, T const& upper_bound) const {
maximum<T> mx;
minimum<T> mn;
return mn(mx(value, lower_bound), upper_bound);
}
CUTLASS_HOST_DEVICE
T operator()(T const& value, Arguments const& args = Arguments()) const {
return this->operator()(value, args.lower_bound, args.upper_bound);
}
};
template <typename T, int N>
struct Clamp<Array<T,N>> {
using Arguments = typename Clamp<T>::Arguments;
CUTLASS_HOST_DEVICE
Array<T,N> operator()(Array<T,N> const& values, T const& lower_bound, T const& upper_bound) const {
maximum<Array<T,N>> mx;
minimum<Array<T,N>> mn;
return mn(mx(values, lower_bound), upper_bound);
}
CUTLASS_HOST_DEVICE
Array<T,N> operator()(Array<T,N> const& values, Arguments const& args = Arguments()) const {
return this->operator()(values, args.lower_bound, args.upper_bound);
}
};
// Leaky Relu operator
template <typename T>
struct LeakyReLU {
static const bool kIsHeavy = false;
struct Arguments {
T leaky_alpha = T(0);
};
CUTLASS_HOST_DEVICE
T operator()(T const& value, T const& leaky_alpha) const {
T res = value > T(0) ? value : value * leaky_alpha;
return res;
}
CUTLASS_HOST_DEVICE
T operator()(T const& value, Arguments const& args = Arguments()) const {
this->operator()(value, args.leaky_alpha);
}
};
template <typename T, int N>
struct LeakyReLU<Array<T, N> > {
static const bool kIsHeavy = false;
using Arguments = typename LeakyReLU<T>::Arguments;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& values, T const& leaky_alpha) const {
Array<T, N> y;
LeakyReLU<T> leaky_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < int(values.size()); ++i) {
y[i] = leaky_op(values[i], leaky_alpha);
}
return y;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& values, Arguments const& args = Arguments()) const {
return this->operator()(values, args.leaky_alpha);
}
};
// Tanh operator
template <typename T>
struct Tanh {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
T operator()(T const &value) const {
return fast_tanh(value);
}
};
template <typename T, int N>
struct Tanh<Array<T, N> > {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
Tanh<T> tanh_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = tanh_op(value[i]);
}
return y;
}
};
template <int N>
struct Tanh<Array<half_t, N>> {
using T = half_t;
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& z) const {
fast_tanh_op<Array<T, N>> tanh;
return tanh(z);
}
};
// Sigmoid operator
template <typename T>
struct Sigmoid {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
T operator()(T const &value) const {
return T(1) / (T(1) + fast_exp(-value));
}
};
template <typename T, int N>
struct Sigmoid<Array<T, N> > {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
Sigmoid<T> sigmoid_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = sigmoid_op(value[i]);
}
return y;
}
};
template <int N>
struct Sigmoid<Array<half_t, N>> {
using T = half_t;
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& z) const {
plus<Array<T, N>> add;
#if defined(CUTLASS_USE_TANH_FOR_SIGMOID)
multiplies<Array<T, N>> mul;
fast_tanh_op<Array<T, N>> tanh;
return mul(add(tanh(mul(z, cutlass::constants::half<T>())), cutlass::constants::one<T>()),
cutlass::constants::half<T>());
#else
divides<Array<T, N>> div;
negate<Array<T, N>> neg;
fast_exp_op<Array<T, N>> fast_exp;
return div(cutlass::constants::one<T>(),
add(cutlass::constants::one<T>(),
fast_exp(neg(z))));
#endif
}
};
// SiLu (swish) operator introduced by Elfwing et al. in the following paper
// "Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning" (2017)
// https://arxiv.org/pdf/1702.03118.pdf
// It is used in EfficientNet and YOLOv5, for example.
// Reference: https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html
template <typename T>
struct SiLu {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
T operator()(T const &value) const {
Sigmoid<T> sigmoid;
return value * sigmoid(value);
}
};
template <typename T, int N>
struct SiLu<Array<T, N>> {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Sigmoid<Array<T, N>> sigmoid_op;
multiplies<Array<T, N>> mul;
return mul(value, sigmoid_op(value));
}
};
// Hardswish operator introduced by Howard et al. in the following paper
// "Searching for MobileNetV3" (2019)
// https://arxiv.org/pdf/1905.02244.pdf
// It is used in models based on MobilenetNetV3.
// Reference: https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html
template <typename T>
struct HardSwish {
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
T operator()(T const &x) const {
minimum<T> mn;
maximum<T> mx;
T relu6 = mn(mx(x + T(3), T(0)), T(6));
return x * relu6 / T(6);
}
};
template <>
struct HardSwish<float> {
using T = float;
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
T operator()(T const &x) const {
minimum<T> mn;
maximum<T> mx;
T relu6 = mn(mx(x + T(3), T(0)), T(6));
return x * relu6 * 0.16666667f;
}
};
template <typename T, int N>
struct HardSwish<Array<T, N> > {
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
HardSwish<T> hardswish_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = hardswish_op(value[i]);
}
return y;
}
};
template <int N>
struct HardSwish<Array<half_t, N> > {
using T = half_t;
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
minimum<Array<T, N> > mn;
maximum<Array<T, N> > mx;
multiplies<Array<T, N> > mul;
plus<Array<T, N> > add;
return mul(mul(mn(mx(add(value, T(3)), T(0)), T(6)), value), T(0.16666667f));
}
};
//
// GELU function definitions implemented as described by
// Hendrycks, D., and Gimpel, K. in
// "Gaussian Error Linear Units (GELUs)." (2020)
// https://arxiv.org/pdf/1606.08415.pdf
//
// Floating-point constants are Taylor coefficients described in the paper.
//
// GELU operator
template <typename T>
struct GELU {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
T operator()(T const &value) const {
return T(cutlass::constants::half<T>() * value *
(cutlass::constants::one<T>() + (T)erff((float)(value * cutlass::constants::half_root_two<T>()))));
}
};
template <>
struct GELU<float> {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
float operator()(float const &value) const {
return cutlass::constants::half<float>() * value *
(cutlass::constants::one<float>() + erff(value * cutlass::constants::half_root_two<float>() ));
}
};
template <>
struct GELU<double> {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
double operator()(double const &value) const {
return cutlass::constants::half<double>() * value *
(cutlass::constants::one<double>() + erf( value * cutlass::constants::half_root_two<double>() ));
}
};
template <typename T, int N>
struct GELU<Array<T, N> > {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
GELU<T> gelu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = gelu_op(value[i]);
}
return y;
}
};
template <typename T>
using ScaledGELU = Scale<GELU<T>>;
// GELU operator implemented using the Taylor series approximation
template <typename T>
struct GELU_taylor {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
T operator()(T const &z) const {
T k0 = T(0.7978845608028654);
T k1 = T(0.044715);
return T(cutlass::constants::half<T>() * z *
(cutlass::constants::one<T>() + fast_tanh(k0 * z * (cutlass::constants::one<T>() + k1 * z * z))));
}
};
template <int N>
struct GELU_taylor<Array<half_t, N> > {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const &z) const {
using T = half_t;
Array<half_t, N> y;
half_t k0 = half_t(0.7978845608028654);
half_t k1 = half_t(0.044715);
multiply_add<Array<half_t, N>> fma;
multiplies<Array<half_t, N>> mul;
plus<Array<half_t, N>> add;
fast_tanh_op<Array<half_t, N>> tanh;
Array<half_t, N> u = mul(mul(k0, z), fma(mul(k1, z), z, cutlass::constants::one<T>()));
y = mul(mul(z, cutlass::constants::half<T>()), add(cutlass::constants::one<T>(), tanh(u)));
return y;
}
};
template <typename T, int N>
struct GELU_taylor<Array<T, N> > {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
GELU_taylor<T> gelu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = gelu_op(value[i]);
}
return y;
}
};
template <typename T>
using ScaledGELU_taylor = Scale<GELU_taylor<T>>;
/// Computes backwards pass for GELU operator assuming d_t is the layer gradient and
/// z is computed from the forward pass.
template <typename T>
struct dGELU {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
T operator()(T const &d_t, T const &z) const {
T k0 = T(0.7978845608028654);
T k1 = T(0.044715);
T k2 = T(0.1070322243);
T tanh_out = fast_tanh(k0 * z * (1 + k1 * z * z));
T ff = constants::half<T>() * z * ((1 - tanh_out * tanh_out) * (k0 + k2 * z * z)) +
constants::half<T>() * (1 + tanh_out);
return ff * d_t;
}
};
template <typename T, int N>
struct dGELU<Array<T, N> > {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &d_t, Array<T, N> const &z) const {
Array<T, N> y;
dGELU<T> gelu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = gelu_op(d_t[i], z[i]);
}
return y;
}
};
template <typename T>
struct dReLU {
CUTLASS_HOST_DEVICE
T operator()(T d_t, bool d_relu) const {
return d_relu ? d_t : T(0);
}
template <typename U>
CUTLASS_HOST_DEVICE
T operator()(T d_t, U d_relu) const {
return operator()(d_t, static_cast<bool>(d_relu));
}
};
template <typename T, int N>
struct dReLU<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& d_t, bool const (&d_relu)[N]) const {
Array<T, N> y;
dReLU<T> relu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = relu_op(d_t[i], d_relu[i]);
}
return y;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& d_t, Array<uint1b_t, N> const& d_relu) const {
UnpackPredicates<N> unpack_op;
bool preds[N];
unpack_op(preds, d_relu);
return operator()(d_t, preds);
}
template <typename U>
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& d_t, Array<U, N> const& d_relu) const {
Array<T, N> y;
dReLU<T> relu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = relu_op(d_t[i], d_relu[i]);
}
return y;
}
};
/// Computes backwards pass for ReLU operator assuming d_t is the layer gradient and
/// z is computed from the forward pass.
template <typename T>
struct dReLU_Z {
CUTLASS_HOST_DEVICE
T operator()(T d_t, T z) const {
return z < 0 ? T(0) : d_t;
}
};
template <typename T, int N>
struct dReLU_Z<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& d_t, Array<T, N> const& z) const {
Array<T, N> y;
dReLU_Z<T> relu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = relu_op(d_t[i], z[i]);
}
return y;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/activation.h/0 | {
"file_path": "include/cutlass/epilogue/thread/activation.h",
"repo_id": "include",
"token_count": 7230
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination with a maximum operation used by epilogues.
*/
#pragma once
#include "cutlass/half.h"
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/epilogue/thread/scale_type.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Single source of truth for whether to unroll for `LinearCombinationClamp()`
constexpr bool LinearCombinationReluIsHeavy() {
return false;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class LinearCombinationRelu {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static int const kCount = Count;
static const ScaleType::Kind kScale = Scale;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentCompute = Array<ElementCompute, kCount>;
using FragmentScaleBias = Array<ElementCompute, kCount>;
using FragmentSource = Array<ElementOutput, kCount>;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = detail::LinearCombinationReluIsHeavy();
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute threshold; ///< minimum value that is output
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
threshold(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta = ElementCompute(0),
ElementCompute threshold = ElementCompute(0)
): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr = nullptr,
ElementCompute threshold = ElementCompute(0)
): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
ElementCompute threshold_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationRelu(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
threshold_ = params.threshold;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::OnlyAlphaPerChannelScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
// set to NaN to make ReLU no-op for all except last k partitions
int64_t allones = -1;
threshold_ = reinterpret_cast<ElementCompute const &>(allones);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
ReLu<FragmentCompute> relu;
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
}
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_accumulator;
ReLu<FragmentCompute> relu;
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
}
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
/// Computes per-channel linear scaling and bias : D = scale * accumulator + bias
/// Scale and Bias are from input Fragment
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentScaleBias const &scale,
FragmentScaleBias const &bias) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform per-channel scale and bias
FragmentCompute intermediate;
multiply_add<FragmentCompute> mul_add_accumulator;
if(Scale == ScaleType::OnlyAlphaPerChannelScaling)
intermediate = mul_add_accumulator(scale, converted_accumulator, bias); // D = scale * Accum + bias
else
intermediate = mul_add_accumulator(alpha_, converted_accumulator, bias); // D = alpha * Accum + bias
ReLu<FragmentCompute> relu;
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conditional guards to enable partial specialization for packed integers
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && ((__CUDACC_VER_MAJOR__ > 10) || ((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2)))
/// Applies a linear combination operator to an array of elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
/// Special handling for int types
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
ScaleType::Kind Scale, ///< Control Alpha and Beta scaling
FloatRoundStyle Round
>
class LinearCombinationRelu <ElementOutput_, Count, int, float, Scale, Round> {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = int;
using ElementCompute = float;
static bool const kIsHeavy = detail::LinearCombinationReluIsHeavy();
static int const kCount = Count;
static const ScaleType::Kind kScale = Scale;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentCompute = Array<ElementCompute, kCount>;
using FragmentScaleBias = Array<ElementCompute, kCount>;
using FragmentSource = Array<ElementOutput, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute threshold; ///< minimum value that is output
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
threshold(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta = ElementCompute(0),
ElementCompute threshold = ElementCompute(0)
): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr = nullptr,
ElementCompute threshold = ElementCompute(0)
): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
ElementCompute threshold_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationRelu(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
threshold_ = params.threshold;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::OnlyAlphaPerChannelScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
// set to NaN to make ReLU no-op for all except last k partitions
int64_t allones = -1;
threshold_ = reinterpret_cast<ElementCompute const &>(allones);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
ReLu<FragmentCompute> relu;
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
}
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
if (platform::numeric_limits<ElementOutput>::is_integer) {
// Convert floats back to INT
FragmentAccumulator scaled_accumulator;
NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter;
scaled_accumulator = compute_converter(intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, int, kCount, Round>
destination_converter;
return destination_converter(scaled_accumulator);
} else {
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_accumulator;
ReLu<FragmentCompute> relu;
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
}
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
if (platform::numeric_limits<ElementOutput>::is_integer) {
// Convert floats back to INT
FragmentAccumulator scaled_accumulator;
NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter;
scaled_accumulator = compute_converter(intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, int, kCount, Round>
destination_converter;
return destination_converter(scaled_accumulator);
} else {
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
}
/// Computes per-channel linear scaling and bias : D = scale * accumulator + bias
/// Scale and Bias are from input Fragment
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentScaleBias const &scale,
FragmentScaleBias const &bias) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform per-channel scale and bias
FragmentCompute intermediate;
multiply_add<FragmentCompute> mul_add_accumulator;
if(Scale == ScaleType::OnlyAlphaPerChannelScaling)
intermediate = mul_add_accumulator(scale, converted_accumulator, bias); // D = scale * Accum + bias
else
intermediate = mul_add_accumulator(alpha_, converted_accumulator, bias); // D = alpha * Accum + bias
ReLu<FragmentCompute> relu;
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
if (platform::numeric_limits<ElementOutput>::is_integer) {
// Convert floats back to INT
FragmentAccumulator scaled_accumulator;
NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter;
scaled_accumulator = compute_converter(intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, int, kCount, Round>
destination_converter;
return destination_converter(scaled_accumulator);
} else {
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
}
};
#endif // Conditional guards to enable partial specialization for packed integers
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/linear_combination_relu.h/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_relu.h",
"repo_id": "include",
"token_count": 6857
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops on Volta.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/layout/permute.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess,
bool ScatterD = false,
typename PermuteDLayout = layout::NoPermute
>
struct DefaultEpilogueVoltaTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess,
ElementAccumulator
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8;
static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B");
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator,
kSharedMemAlignment
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueVoltaTensorOpStridedDgrad {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess,
ElementAccumulator
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8;
static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B");
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator,
kSharedMemAlignment
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
int Rank,
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueVoltaTensorOpAffineRankN {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess,
ElementAccumulator
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankN<
OutputTileThreadMap,
ElementOutput,
Rank
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8;
static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B");
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator,
kSharedMemAlignment
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h",
"repo_id": "include",
"token_count": 3440
} | 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator for planar-complex output representations.
///
/// Note, as with most CUTLASS components for planar complex, the template arguments describe
/// the underlying real data type.
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator
typename Padding_ ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
>
class EpiloguePlanarComplex {
public:
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
/// Output layout is always row-major
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = ArrayPlanarComplex<
typename WarpMmaOperator::FragmentC::Element,
WarpMmaOperator::FragmentC::kElements
>;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Shape of each warp-level operation
using WarpShape = typename WarpMmaOperator::Shape;
/// Number of warps
using WarpCount = gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
kPartitionsK
>;
/// Shared memory allocation
struct SharedStorage {
//
// Type definitions
//
/// Element type of shared memory
using Element = typename WarpTileIterator::Element;
/// Tensor reference to shared memory allocation
using TensorRef = typename WarpTileIterator::TensorRef;
/// Layout of shared memory allocation
using Layout = typename WarpTileIterator::Layout;
/// Logical shape of the shared memory tile written to by all warps.
using Shape = MatrixShape<
WarpCount::kM * WarpTileIterator::Shape::kRow * WarpCount::kK,
WarpCount::kN * WarpTileIterator::Shape::kColumn
>;
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
Shape::kRow + Padding::kRow,
Shape::kColumn + Padding::kColumn
>;
static int const kImaginaryStride = StorageShape::kCount;
//
// Data members
//
AlignedBuffer<Element, kImaginaryStride * 2> storage;
//
// Methods
//
/// Returns a pointer to the shared memory buffer
CUTLASS_DEVICE
Element *data() {
return storage.data();
}
/// Returns a tensor reference to the shared memory buffer
CUTLASS_DEVICE
TensorRef reference() {
return TensorRef(
storage.data(),
Layout::packed({StorageShape::kRow, StorageShape::kColumn}));
}
};
private:
//
// Data members
//
SharedStorage &shared_storage_;
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Stores a warp's fragment of accumulators to SMEM
WarpTileIterator warp_tile_iterator_;
public:
/// Constructor
CUTLASS_DEVICE
EpiloguePlanarComplex(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
shared_storage_(shared_storage),
shared_load_iterator_(shared_storage.reference(), thread_idx),
warp_tile_iterator_(shared_storage.reference(), lane_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to three coordinates:
//
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_k = warp_idx / (WarpCount::kM * WarpCount::kN);
int warp_mn = warp_idx % (WarpCount::kM * WarpCount::kN);
int warp_m = warp_mn % WarpCount::kM;
int warp_n = warp_mn / WarpCount::kM;
MatrixCoord warp_offset{warp_k * WarpCount::kM + warp_m, warp_n};
warp_tile_iterator_.add_tile_offset(warp_offset);
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator_real, ///< Tile iterator for destination
OutputTileIterator destination_iterator_imag, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator_real, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
OutputTileIterator source_iterator_imag) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
typename OutputTileIterator::Fragment source_fragment_real;
typename OutputTileIterator::Fragment source_fragment_imag;
if (!output_op.is_source_needed()) {
source_iterator_real.clear_mask();
source_iterator_imag.clear_mask();
}
source_fragment_real.clear();
source_fragment_imag.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator_real(accumulators.real);
AccumulatorFragmentIterator accum_fragment_iterator_imag(accumulators.imag);
//
// Iterate over accumulator tile
//
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_iterator_real.load(source_fragment_real);
source_iterator_imag.load(source_fragment_imag);
++source_iterator_real;
++source_iterator_imag;
//
// Convert and store fragment
//
__syncthreads();
typename AccumulatorFragmentIterator::Fragment accum_fragment_real;
typename AccumulatorFragmentIterator::Fragment accum_fragment_imag;
accum_fragment_iterator_real.load(accum_fragment_real);
accum_fragment_iterator_imag.load(accum_fragment_imag);
++accum_fragment_iterator_real;
++accum_fragment_iterator_imag;
this->warp_tile_iterator_.store(accum_fragment_real);
this->warp_tile_iterator_.store_with_pointer_offset(accum_fragment_imag, SharedStorage::kImaginaryStride);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment_real[kPartitionsK];
typename SharedLoadIterator::Fragment aligned_accum_fragment_imag[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment_real[0]);
shared_load_iterator_.load_with_pointer_offset(aligned_accum_fragment_imag[0], SharedStorage::kImaginaryStride);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
static_assert(kPartitionsK == 1, "Sliced-K not supported for planar complex at this time");
//
// Compute the output result
//
typename OutputTileIterator::Fragment output_fragment_real;
typename OutputTileIterator::Fragment output_fragment_imag;
apply_output_operator_(
output_fragment_real,
output_fragment_imag,
output_op,
aligned_accum_fragment_real[0],
aligned_accum_fragment_imag[0],
source_fragment_real,
source_fragment_imag);
//
// Store the final result
//
destination_iterator_real.store(output_fragment_real);
destination_iterator_imag.store(output_fragment_imag);
++destination_iterator_real;
++destination_iterator_imag;
}
}
private:
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
typename OutputTileIterator::Fragment &output_fragment_real,
typename OutputTileIterator::Fragment &output_fragment_imag,
OutputOp const &output_op, ///< Output operator
typename SharedLoadIterator::Fragment const &aligned_accum_fragment_real,
typename SharedLoadIterator::Fragment const &aligned_accum_fragment_imag,
typename OutputTileIterator::Fragment const &source_fragment_real,
typename OutputTileIterator::Fragment const &source_fragment_imag) {
OutputAccessType *output_frag_real_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment_real);
OutputAccessType *output_frag_imag_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment_imag);
AccumulatorAccessType const *compute_frag_real_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment_real);
AccumulatorAccessType const *compute_frag_imag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment_imag);
OutputAccessType const *source_frag_real_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment_real);
OutputAccessType const *source_frag_imag_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment_imag);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
auto result_fragment = output_op(
make_ArrayPlanarComplex(compute_frag_real_ptr[i], compute_frag_imag_ptr[i]),
make_ArrayPlanarComplex(source_frag_real_ptr[i], source_frag_imag_ptr[i])
);
output_frag_real_ptr[i] = result_fragment.real;
output_frag_imag_ptr[i] = result_fragment.imag;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_planar_complex.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_planar_complex.h",
"repo_id": "include",
"token_count": 4937
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/tensor_ref.h"
namespace cutlass {
namespace epilogue {
namespace threadblock {
template<
typename TensorLayout_, ///! The original output tensor layout
typename OutputIteratorLayout_, ///! Layout used by epilogue output iterator
typename TensorRef_, ///! Input tensor to epilogue output iterator
conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad)
typename ConvProblemSize_ ///! Convolutional operator on 2D or 3D problem
>
struct ConvOutputIteratorParameter {
using TensorLayout = TensorLayout_;
using OutputIteratorLayout = OutputIteratorLayout_;
using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord;
using TensorRef = TensorRef_;
static conv::Operator const kConvolutionalOperator = ConvOperator;
using ConvProblemSize = ConvProblemSize_;
/// Wgrad stride idx for implicit gemm algorithm
// Conv2d row-major matrix (KxRSC)
// Conv3d row-major matrix (KxTRSC)
static int const kWgradStrideIdx =
platform::is_same<TensorLayout, layout::TensorNHWC>::value ? 2 : 3;
/// This chooses the appropriate stride element of the C tensor.
static int const kTensorStrideIdx =
(kConvolutionalOperator == conv::Operator::kWgrad ? kWgradStrideIdx : 0);
CUTLASS_HOST_DEVICE
static OutputIteratorLayout layout(const TensorRef & ref) {
return ref.stride(kTensorStrideIdx);
}
CUTLASS_HOST_DEVICE
static OutputTensorCoord extent(ConvProblemSize problem_size) {
return conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn();
}
};
template<
typename TensorRef_, ///! Input tensor to epilogue output iterator
typename ConvProblemSize_ ///! Convolutional operator on 2D or 3D problem
>
struct ConvOutputIteratorParameter<layout::TensorNHWC, layout::TensorNHWC, TensorRef_, conv::Operator::kFprop, ConvProblemSize_> {
using TensorLayout = layout::TensorNHWC;
using OutputIteratorLayout = layout::TensorNHWC;
using MappedLayout = layout::RowMajor;
using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord;
using MappedTensorCoord = typename MappedLayout::TensorCoord;
using TensorRef = TensorRef_;
static conv::Operator const kConvolutionalOperator = conv::Operator::kFprop;
using ConvProblemSize = ConvProblemSize_;
CUTLASS_HOST_DEVICE
static OutputIteratorLayout layout(const TensorRef & ref) {
return ref.stride();
}
CUTLASS_HOST_DEVICE
static MappedTensorCoord extent(ConvProblemSize problem_size) {
return conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn();
}
};
template<
typename TensorRef_, ///! Input tensor to epilogue output iterator
typename ConvProblemSize_ ///! Convolutional operator on 2D or 3D problem
>
struct ConvOutputIteratorParameter<layout::TensorNDHWC, layout::TensorNDHWC, TensorRef_, conv::Operator::kFprop, ConvProblemSize_> {
using TensorLayout = layout::TensorNDHWC;
using OutputIteratorLayout = layout::TensorNDHWC;
using MappedLayout = layout::RowMajor;
using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord;
using MappedTensorCoord = typename MappedLayout::TensorCoord;
using TensorRef = TensorRef_;
static conv::Operator const kConvolutionalOperator = conv::Operator::kFprop;
using ConvProblemSize = ConvProblemSize_;
CUTLASS_HOST_DEVICE
static OutputIteratorLayout layout(const TensorRef & ref) {
return ref.stride();
}
CUTLASS_HOST_DEVICE
static MappedTensorCoord extent(ConvProblemSize problem_size) {
return conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn();
}
};
template <
int InterleavedK,
typename TensorRef_,
conv::Operator ConvOperator,
typename ConvProblemSize_
>
struct ConvOutputIteratorParameter<
layout::TensorNCxHWx<InterleavedK>,
layout::TensorNCxHWx<InterleavedK>,
TensorRef_,
ConvOperator,
ConvProblemSize_>
{
using TensorLayout = typename layout::TensorNCxHWx<InterleavedK>;
using OutputIteratorLayout = typename layout::TensorNCxHWx<InterleavedK>;
using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord;
using TensorRef = TensorRef_;
static conv::Operator const kConvolutionalOperator = ConvOperator;
using ConvProblemSize = ConvProblemSize_;
CUTLASS_HOST_DEVICE
static OutputIteratorLayout layout(const TensorRef & ref) {
return ref.stride();
}
CUTLASS_HOST_DEVICE
static OutputTensorCoord extent(ConvProblemSize problem_size) {
return problem_size.output_extent();
}
};
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
| include/cutlass/epilogue/threadblock/output_iterator_parameter.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/output_iterator_parameter.h",
"repo_id": "include",
"token_count": 2284
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile
that participate in one warp-level store operation.
Typically, the accumulator tile is the largest single block of register-backed storage
within the kernel. Storing it to memory is best accomplished by partitioning it into
smaller tiles and storing these sequentially.
Round trips through shared memory during the Epilogue phase require partitioning, as
shared memory capacity is typically insufficient for a threadblock's total accumulator
size.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/epilogue/warp/simt_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Fragment iterator for SIMT accumulator arrangements
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename Operator, ///< matrix multiply operation (concept: arch::Mma)
typename Layout, ///< target shared memory layout
typename MmaSimtPolicy ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class FragmentIteratorSimt;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major shared memory
template <
typename WarpShape_, ///< shape of the warp-level GEMM tile
typename Operator_ , ///< matrix multiply operator (concept: arch::Mma)
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class FragmentIteratorSimt<WarpShape_, Operator_, layout::RowMajor, MmaSimtPolicy_> {
public:
using WarpShape = WarpShape_;
using Operator = Operator_;
using Layout = layout::RowMajor;
/// Policy for warp-level epilogue components
using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
typename Operator::ElementC,
Policy::kElementsPerIteration>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
typename Operator::ElementC,
Policy::kAccumulatorElementCount>;
using OutputAccumulatorTile = AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
private:
/// Internal access type
using AccessType = Array<typename Operator::ElementC, Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
FragmentIteratorSimt(AccumulatorTile const &accum):
accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0) {
}
/// Increments
CUTLASS_HOST_DEVICE
FragmentIteratorSimt &operator++() {
++index_;
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
FragmentIteratorSimt &operator--() {
--index_;
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, int index_offset = 0) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
int accumulator_access_offset = index_ * Policy::kAccessesPerIteration + n;
frag_ptr[n] = accumulators_[accumulator_access_offset];
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/warp/fragment_iterator_simt.h/0 | {
"file_path": "include/cutlass/epilogue/warp/fragment_iterator_simt.h",
"repo_id": "include",
"token_count": 1642
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Define basic numeric operators
This is inspired by the Standard Library's <functional> header.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include <cuda_runtime.h>
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include <mma.h>
#endif // defined(CUTLASS_ARCH_WMMA_ENABLED)
#ifdef _MSC_VER
// Provides support for alternate operators such as 'and', 'or', ...
#include <iso646.h>
#endif // _MSC_VER
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct absolute_value_op {
CUTLASS_HOST_DEVICE
T operator()(T lhs) const {
return abs(lhs);
}
};
template <>
struct absolute_value_op<float> {
CUTLASS_HOST_DEVICE
float operator()(float lhs) const { return fabs(lhs); }
};
template <typename T>
struct plus {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
lhs += rhs;
return lhs;
}
};
template <typename T>
struct minus {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
lhs -= rhs;
return lhs;
}
};
template <typename T>
struct multiplies {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
lhs *= rhs;
return lhs;
}
};
template <typename T>
struct scale {
T const scaling_factor_;
CUTLASS_HOST_DEVICE
scale(float scaling_factor) : scaling_factor_(scaling_factor) {
}
T operator()(T const &rhs) const {
T result = rhs * scaling_factor_;
return result;
}
};
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530
/// Partial specializations needed when __CUDA_NO_HALF2_OPERATORS__ is set
template<>
struct plus<__half2> {
CUTLASS_HOST_DEVICE
__half2 operator()(__half2 lhs, __half2 const &rhs) const {
return __hadd2(lhs, rhs);
}
};
template<>
struct minus<__half2> {
CUTLASS_HOST_DEVICE
__half2 operator()(__half2 lhs, __half2 const &rhs) const {
return __hsub2(lhs, rhs);
}
};
template<>
struct multiplies<__half2> {
CUTLASS_HOST_DEVICE
__half2 operator()(__half2 lhs, __half2 const &rhs) const {
return __hmul2(lhs, rhs);
}
};
/// Partial specializations needed when __CUDA_NO_HALF_OPERATORS__ is set
template<>
struct plus<__half> {
CUTLASS_HOST_DEVICE
__half operator()(__half lhs, __half const &rhs) const {
return __hadd(lhs, rhs);
}
};
template<>
struct minus<__half> {
CUTLASS_HOST_DEVICE
__half operator()(__half lhs, __half const &rhs) const {
return __hsub(lhs, rhs);
}
};
template<>
struct multiplies<__half> {
CUTLASS_HOST_DEVICE
__half operator()(__half lhs, __half const &rhs) const {
return __hmul(lhs, rhs);
}
};
#endif // defined(__CUDA_ARCH__)
/// Squares with optional conversion
template <typename T, typename Output = T>
struct square {
CUTLASS_HOST_DEVICE
Output operator()(T lhs) const {
multiplies<Output> mul_op;
Output y = Output(lhs);
return mul_op(y, y);
}
};
/// Returns the magnitude squared of an element.
template <typename T, typename Output = T>
struct magnitude_squared {
CUTLASS_HOST_DEVICE
Output operator()(T lhs) const {
multiplies<Output> mul_op;
Output y = Output(lhs);
return mul_op(y, y);
}
};
/// Computes the square of a difference with optional conversion
template <typename T, typename Output = T>
struct square_difference {
CUTLASS_HOST_DEVICE
Output operator()(T lhs, T rhs) const {
multiplies<Output> mul_op;
Output y = Output(lhs) - Output(rhs);
return mul_op(y, y);
}
};
/// Computes the square of a difference with optional conversion
template <typename T, typename Output = T>
struct magnitude_squared_difference {
CUTLASS_HOST_DEVICE
Output operator()(T lhs, T rhs) const {
multiplies<Output> mul_op;
Output y = Output(lhs) - Output(rhs);
return mul_op(y, y);
}
};
// Computes the reciprocal square root
template <typename T>
struct inverse_square_root;
template <>
struct inverse_square_root<float> {
CUTLASS_HOST_DEVICE
float operator()(float const &lhs) const {
#if defined(__CUDA_ARCH__)
return rsqrtf(lhs);
#else
return 1.f / std::sqrt(lhs);
#endif
}
};
template <>
struct inverse_square_root<half_t> {
CUTLASS_HOST_DEVICE
half_t operator()(half_t const &lhs) const {
#if defined(__CUDA_ARCH__)
auto result = hrsqrt(reinterpret_cast<__half const &>(lhs));
return reinterpret_cast<half_t const &>(result);
#else
return half_t(1.f / std::sqrt(half_t::convert(lhs)));
#endif
}
};
/// Divides
template <typename T>
struct divides {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
lhs /= rhs;
return lhs;
}
};
/// reciprocal_approximate
template <typename T>
struct reciprocal_approximate {
CUTLASS_HOST_DEVICE
T operator()(T lhs) const {
return divides<T>{}(T(1), lhs);
}
};
template <>
struct reciprocal_approximate <float> {
CUTLASS_HOST_DEVICE
float operator()(float lhs) const {
float ret;
ret = 1.0f / lhs;
return ret;
}
};
/// Negate
template <typename T>
struct negate {
CUTLASS_HOST_DEVICE
T operator()(T lhs) const {
return -lhs;
}
};
/// Greater equal
template <typename T>
struct greater_equal {
CUTLASS_HOST_DEVICE
bool operator()(T const &lhs, T const &rhs) const {
return (lhs >= rhs);
}
};
/// Greater
template <typename T>
struct greater {
CUTLASS_HOST_DEVICE
bool operator()(T const &lhs, T const &rhs) const {
return (lhs > rhs);
}
};
/// Less equal
template <typename T>
struct less_equal {
CUTLASS_HOST_DEVICE
bool operator()(T const &lhs, T const &rhs) const {
return (lhs <= rhs);
}
};
/// Less
template <typename T>
struct less {
CUTLASS_HOST_DEVICE
bool operator()(T const &lhs, T const &rhs) const {
return (lhs < rhs);
}
};
template <typename T, bool PropagateNaN = false>
struct maximum {
CUTLASS_HOST_DEVICE
T operator()(T const &lhs, T const &rhs) const {
return (lhs < rhs ? rhs : lhs);
}
};
// This is a subclass and not an alias
// in order to work around a known Clang issue,
// where a template template parameter with one template parameter
// does not match classes that take multiple template parameters
// but have defaults for all but the first.
template<typename T>
struct maximum_with_default_nan_propagation : public maximum<T>
{};
// Maximum with nan propagation
// To propagate NANs, the "max" of a two element that contains NaNs should also return a NaN
template <typename T>
struct maximum<T, true> {
CUTLASS_HOST_DEVICE
T operator()(T const &lhs, T const &rhs) const {
#if defined(__CUDA_ARCH__)
return lhs > rhs or isnan(lhs) ? lhs : rhs;
#else
return lhs > rhs or std::isnan(lhs) ? lhs : rhs;
#endif
}
};
template <>
struct maximum<float, false> {
CUTLASS_HOST_DEVICE
float operator()(float const &lhs, float const &rhs) const {
return fmaxf(lhs, rhs);
}
};
template <>
struct maximum<float, true> {
CUTLASS_HOST_DEVICE
float operator()(float const lhs, float const rhs) const {
float res;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
asm volatile("max.NaN.f32 %0, %1, %2;\n" : "=f"(res) : "f"(lhs), "f"(rhs));
#elif defined(__CUDA_ARCH__)
res = lhs > rhs or isnan(lhs) ? lhs : rhs;
#else
res = lhs > rhs or std::isnan(lhs) ? lhs : rhs;
#endif
return res;
}
};
// This is a subclass and not an alias
// in order to work around a known Clang issue,
// where a template template parameter with one template parameter
// does not match classes that take multiple template parameters
// but have defaults for all but the first.
template <typename T>
struct maximum_with_nan_propagation : maximum<T, true>
{};
// This alias exists for backwards compatibility only.
// Please use the correctly spelled class template above.
template <typename T>
using maximum_with_nan_propogation = maximum_with_nan_propagation<T>;
template <typename T, bool PropagateNaN = false>
struct minimum{
CUTLASS_HOST_DEVICE
T operator()(T const &lhs, T const &rhs) const {
return (rhs < lhs ? rhs : lhs);
}
};
template <typename T>
struct minimum<T, true> {
CUTLASS_HOST_DEVICE
T operator()(T const &lhs, T const &rhs) const {
#if defined(__CUDA_ARCH__)
return lhs < rhs or isnan(lhs) ? lhs : rhs;
#else
return lhs < rhs or std::isnan(lhs) ? lhs : rhs;
#endif
}
};
template <>
struct minimum<float, false> {
CUTLASS_HOST_DEVICE
float operator()(float const &lhs, float const &rhs) const {
return fminf(lhs, rhs);
}
};
template <typename T, bool PropagateNaN = false>
struct maximum_absolute_value {
CUTLASS_HOST_DEVICE
float operator()(T const &lhs, T const &rhs) const {
absolute_value_op<T> abs_op;
maximum<T, PropagateNaN> max_op;
return max_op(abs_op(lhs), abs_op(rhs));
}
};
// assumes the left operand is already an absolute value
template <typename T, bool PropagateNaN = false>
struct maximum_absolute_value_reduction {
CUTLASS_HOST_DEVICE
float operator()(T const &lhs, T const &rhs) const {
absolute_value_op<T> abs_op;
maximum<T, PropagateNaN> max_op;
return max_op(lhs, abs_op(rhs));
}
};
/// Fused multiply-add
template <typename A, typename B = A, typename C = A>
struct multiply_add {
CUTLASS_HOST_DEVICE
C operator()(A const &a, B const &b, C const &c) const {
return C(a) * C(b) + c;
}
};
template <typename T>
struct square_and_plus {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
multiply_add<T> multiply_add_op;
return multiply_add_op(rhs, rhs, lhs);
}
};
// Fused multiply-add that takes exactly one template parameter.
// This is useful for working around a known Clang issue,
// where a template template parameter with one template parameter
// does not match classes that take multiple template parameters
// but have defaults for all but the first.
template <typename A>
struct homogeneous_multiply_add : public multiply_add<A, A, A>
{};
/// Fused multiply-add
template <typename A, typename B = A, typename C = A>
struct multiply_add_relu0 {
CUTLASS_HOST_DEVICE
C operator()(A const &a, B const &b, C const &c) const {
maximum<C> mx;
return mx(C(a) * C(b) + c, C(0));
}
};
/// Fused multiply-add
template <typename T>
struct and_add {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b, T const &c) const {
return ((a & b) + c);
}
};
/// Fused multiply-add
template <typename T>
struct xor_add {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b, T const &c) const {
return ((a ^ b) + c);
}
};
template <typename T>
struct conjugate {
CUTLASS_HOST_DEVICE
T operator()(T const &a) const {
return a;
}
};
template <typename T>
struct first {
CUTLASS_HOST_DEVICE
T operator()(T const & first, T const &...) const {
return first;
}
CUTLASS_HOST_DEVICE
T operator()(T const & first) const {
return first;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct logical_and {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return ((static_cast<bool>(a) && static_cast<bool>(b)) ? T(1) : T());
}
};
template <typename T>
struct logical_or {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return ((static_cast<bool>(a) || static_cast<bool>(b)) ? T(1) : T());
}
};
template <typename T>
struct logical_not {
CUTLASS_HOST_DEVICE
T operator()(T const &a) const {
return T(!(a));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct bit_and {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return a & b;
}
};
template <typename T>
struct bit_or {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return a | b;
}
};
template <typename T>
struct bit_not {
CUTLASS_HOST_DEVICE
T operator()(T const &a) const {
return ~a;
}
};
template <typename T>
struct bit_xor {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return a ^ b;
}
};
//////////////////////////////////////////////////////////////////////////////////////////////////
/// Atomic reductions
template <typename T>
struct atomic_add
{
CUTLASS_DEVICE
void operator()(T *ptr, const T &data)
{
#if defined(__CUDA_ARCH__)
atomicAdd(ptr, data);
#endif
}
};
template<>
struct atomic_add<double>
{
CUTLASS_DEVICE
void operator()(double *ptr, const double &data)
{
#if !defined(__CUDA_ARCH__)
CUTLASS_UNUSED(ptr);
CUTLASS_UNUSED(data);
#elif (__CUDA_ARCH__ >= 600)
atomicAdd(ptr, data);
#else
// Use CAS loop
unsigned long long int* ptr_int = reinterpret_cast<unsigned long long int*>(ptr);
unsigned long long int old_int = *ptr_int;
unsigned long long int assumed_int;
do {
double update = data + __longlong_as_double(old_int);
assumed_int = old_int;
old_int = atomicCAS(ptr_int, assumed_int, __double_as_longlong(update));
} while (assumed_int != old_int);
#endif // (__CUDA_ARCH__ >= 600)
}
};
template<>
struct atomic_add<half2>
{
CUTLASS_DEVICE
void operator()(half2 *ptr, const half2 &data)
{
#if !defined(__CUDA_ARCH__) || (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600))
CUTLASS_UNUSED(ptr);
CUTLASS_UNUSED(data);
#else
// Vector-2 atomic reduction requires .target sm_60 or higher
uint32_t word = reinterpret_cast<const uint32_t&>(data);
asm volatile ("red.gpu.global.add.noftz.f16x2 [%0], %1;\n" : : "l"(ptr), "r"(word));
#endif // (__CUDA_ARCH__ >= 600)
}
};
template <typename T>
using red [[deprecated("use atomic_add instead")]] = atomic_add<T>;
template <typename T>
struct atomic_maximum {
CUTLASS_DEVICE
T operator()(T *ptr, T value) const {
#if defined(__CUDA_ARCH__)
return atomicMax(ptr, value);
#else
CUTLASS_UNUSED(ptr);
CUTLASS_UNUSED(value);
CUTLASS_NOT_IMPLEMENTED();
return 0;
#endif
}
};
template <>
struct atomic_maximum<float> {
CUTLASS_DEVICE
float operator()(float *ptr, float value) const {
#if defined(__CUDA_ARCH__)
return !signbit(value) ?
__int_as_float(atomicMax((int*)ptr, __float_as_int(value))) :
__uint_as_float(atomicMin((unsigned int*)ptr, __float_as_uint(value)));
#else
CUTLASS_UNUSED(ptr);
CUTLASS_UNUSED(value);
CUTLASS_NOT_IMPLEMENTED();
return 0;
#endif
}
};
// is_atomic
template <class Fn>
struct is_atomic : platform::false_type {};
template <class T>
struct is_atomic<atomic_add<T>> : platform::true_type {};
template <class T>
struct is_atomic<atomic_maximum<T>> : platform::true_type {};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for nvcuda::wmma::fragment<Use, m, n, k, T, Layout>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
template<typename Use, int m, int n, int k, typename T, typename Layout>
struct plus<nvcuda::wmma::fragment<Use, m, n, k, T, Layout>>
{
using Fragment = nvcuda::wmma::fragment<Use, m, n, k, T, Layout>;
using ElementType = typename Fragment::element_type;
CUTLASS_HOST_DEVICE
Fragment operator()(Fragment const &lhs, Fragment const &rhs) const
{
Fragment result;
plus<ElementType> scalar_op;
ElementType *result_elts = reinterpret_cast<ElementType*>(&result);
const ElementType *lhs_elts = reinterpret_cast<const ElementType*>(&lhs);
const ElementType *rhs_elts = reinterpret_cast<const ElementType*>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Fragment::num_elements; i++) {
result_elts[i] = scalar_op(lhs_elts[i], rhs_elts[i]);
}
return result;
}
};
#endif // defined(CUTLASS_ARCH_WMMA_ENABLED)
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/functional.h/0 | {
"file_path": "include/cutlass/functional.h",
"repo_id": "include",
"token_count": 6596
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Base device-level grouped kernel.
*/
#pragma once
#include <limits>
#include <numeric>
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/gemm_universal.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/trace.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM Grouped
template <typename BaseKernel_>
class BaseGrouped {
public:
using BaseKernel = BaseKernel_;
using ElementA = typename BaseKernel::ElementA;
using LayoutA = typename BaseKernel::LayoutA;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
static ComplexTransform const kTransformA = BaseKernel::kTransformA;
static int const kAlignmentA = BaseKernel::kAlignmentA;
using ElementB = typename BaseKernel::ElementB;
using LayoutB = typename BaseKernel::LayoutB;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
static ComplexTransform const kTransformB = BaseKernel::kTransformB;
static int const kAlignmentB = BaseKernel::kAlignmentB;
using ElementC = typename BaseKernel::ElementC;
using LayoutC = typename BaseKernel::LayoutC;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
static int const kAlignmentC = BaseKernel::kAlignmentC;
using ElementAccumulator = typename BaseKernel::Mma::Policy::Operator::ElementC;
using EpilogueOutputOp = typename BaseKernel::EpilogueOutputOp;
using ThreadblockSwizzle = typename BaseKernel::ThreadblockSwizzle;
using Operator = typename BaseKernel::Operator;
using WarpMmaOperator = typename BaseKernel::Mma::Policy::Operator;
using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator;
using MathOperator = typename WarpMmaOperator::MathOperator;
using OperatorClass = typename WarpMmaOperator::OperatorClass;
using ArchTag = typename WarpMmaOperator::ArchTag;
using ThreadblockShape = typename BaseKernel::Mma::Shape;
using WarpShape = typename BaseKernel::WarpShape;
using InstructionShape = typename BaseKernel::InstructionShape;
static int const kStages = BaseKernel::Mma::kStages;
/// Argument structure
using Arguments = typename BaseKernel::Arguments;
using ProblemInfo = typename BaseKernel::ProblemVisitor::ProblemInfo;
protected:
/// Kernel parameters object
typename BaseKernel::Params params_;
private:
/// Get the number of tiles across all problems in a group
static int32_t group_tile_count(const cutlass::gemm::GemmCoord* problem_sizes_ptr, int problem_count) {
int32_t tiles = 0;
for (int32_t i = 0; i < problem_count; ++i) {
cutlass::gemm::GemmCoord problem = problem_sizes_ptr[i];
BaseKernel::ProblemVisitor::possibly_transpose_problem(problem);
tiles += problem_tile_count(problem);
}
return tiles;
}
/// Copy from `data` to `workspace`
Status copy_to_workspace(void* workspace, void* data, size_t bytes) {
cudaError_t cuda_error = cudaMemcpy(workspace, data, bytes, cudaMemcpyHostToDevice);
if (cuda_error != cudaSuccess) {
// Call cudaGetLastError() to clear the error bit
cuda_error = cudaGetLastError();
CUTLASS_TRACE_HOST(
" cudaMemcpy() returned error "
<< cudaGetErrorString(cuda_error));
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Precomputes scheduling information for the grouped GEMM
Status precompute(Arguments const &args, int32_t tile_count, void* workspace) {
size_t workspace_bytes = get_workspace_size(args);
std::vector<uint8_t> host_workspace(workspace_bytes);
BaseKernel::ProblemVisitor::host_precompute(args.host_problem_sizes,
args.problem_count,
args.threadblock_count,
(void*)host_workspace.data());
return copy_to_workspace(workspace, host_workspace.data(), workspace_bytes);
}
/// Reorder `data` according to `indices`
template <typename T>
static void reorder_array(T* data, const std::vector<size_t>& indices) {
// For now, simply create a copy of the data and then copy over to the original.
std::vector<T> copy(indices.size());
for (size_t i = 0; i < indices.size(); ++i) {
copy.at(i) = data[indices[i]];
}
memcpy(data, copy.data(), indices.size() * sizeof(T));
}
public:
/// Constructs the GEMM.
BaseGrouped() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return BaseKernel::can_implement(args);
}
/// Get the number of tiles in a problem
static int32_t problem_tile_count(cutlass::gemm::GemmCoord const &problem) {
auto grid = BaseKernel::ProblemVisitor::grid_shape(problem);
return BaseKernel::ProblemVisitor::tile_count(grid);
}
/// Get the number of tiles across all problems in a group
static int32_t group_tile_count(Arguments const &args) {
if (args.host_problem_sizes == nullptr) {
CUTLASS_TRACE_HOST("Received nullptr for `args.host_problem_sizes");
return -1;
}
return group_tile_count(args.host_problem_sizes, args.problem_count);
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
if (BaseKernel::ProblemVisitor::kRequiresPrecomputation) {
return BaseKernel::ProblemVisitor::get_workspace_size(args.host_problem_sizes,
args.problem_count,
args.threadblock_count);
} else {
return 0;
}
}
/// Computes the grid shape
static dim3 get_grid_shape(Arguments const &args) {
return dim3(args.threadblock_count, 1, 1);
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int smem_capacity = -1) {
CUTLASS_TRACE_HOST("BaseGrouped::maximum_active_blocks()");
int smem_size = int(sizeof(typename BaseKernel::SharedStorage));
CUTLASS_TRACE_HOST(" smem_size: " << smem_size << " bytes");
cudaError_t result;
if (smem_size > (48 << 10)) {
result = cudaFuncSetAttribute(Kernel<BaseKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
// Call cudaGetLastError() to clear the error bit
result = cudaGetLastError();
CUTLASS_TRACE_HOST(
" cudaFuncSetAttribute() returned error "
<< cudaGetErrorString(result));
return -1;
}
}
int max_active_blocks = -1;
result = cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks,
Kernel<BaseKernel>,
BaseKernel::kThreadCount,
smem_size);
if (result != cudaSuccess) {
// Call cudaGetLastError() to clear the error bit
result = cudaGetLastError();
CUTLASS_TRACE_HOST(
" cudaOccupancyMaxActiveBlocksPerMultiprocessor() returned error "
<< cudaGetErrorString(result));
return -1;
}
CUTLASS_TRACE_HOST(" max_active_blocks: " << max_active_blocks);
return max_active_blocks;
}
/// Sorts each pointer passed in according to the indices that sort
/// `problem_sizes_ptr` in descending order of problem-K dimension.
static void sort_problems(int problem_count,
cutlass::gemm::GemmCoord* problem_sizes_ptr,
int64_t* lda_host_ptr,
int64_t* ldb_host_ptr,
int64_t* ldc_host_ptr,
int64_t* ldd_host_ptr,
int64_t* offset_A_ptr,
int64_t* offset_B_ptr,
int64_t* offset_C_ptr,
int64_t* offset_D_ptr)
{
std::vector<size_t> indices(problem_count);
std::iota(indices.begin(), indices.end(), 0);
std::stable_sort(indices.begin(), indices.end(),
[&problem_sizes_ptr](size_t i, size_t j) {
return problem_sizes_ptr[i].k() > problem_sizes_ptr[j].k();
});
reorder_array(problem_sizes_ptr, indices);
reorder_array(lda_host_ptr, indices);
reorder_array(ldb_host_ptr, indices);
reorder_array(ldc_host_ptr, indices);
reorder_array(ldd_host_ptr, indices);
reorder_array(offset_A_ptr, indices);
reorder_array(offset_B_ptr, indices);
reorder_array(offset_C_ptr, indices);
reorder_array(offset_D_ptr, indices);
}
/// Computes the number of threadblocks to launch for the grouped kernel
static int sufficient(const cutlass::gemm::GemmCoord* problem_sizes_ptr=nullptr,
int problem_count=0,
int available_sm_count=-1) {
// Determine the number of blocks that would be launched to fill up a single
// wave on the GPU with each SM having maximum occupancy.
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
// Call cudaGetLastError() to clear the error bit
result = cudaGetLastError();
CUTLASS_TRACE_HOST(" cudaGetDevice() returned error "
<< cudaGetErrorString(result));
return 0;
}
int multiprocessor_count;
result = cudaDeviceGetAttribute(&multiprocessor_count,
cudaDevAttrMultiProcessorCount, device_idx);
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(
" cudaDeviceGetAttribute() returned error "
<< cudaGetErrorString(result));
return 0;
}
bool override_sm_count = (available_sm_count < 0 || available_sm_count > multiprocessor_count);
if (override_sm_count) {
available_sm_count = multiprocessor_count;
}
int max_active_blocks = maximum_active_blocks();
if (max_active_blocks <= 0) {
return 0;
}
int occupancy_based_block_count = available_sm_count * max_active_blocks;
if (problem_sizes_ptr == nullptr || problem_count == 0) {
return occupancy_based_block_count;
}
int total_tiles = group_tile_count(problem_sizes_ptr, problem_count);
// If the group contains a single problem, launching the exact number of
// threadblocks needed to cover the problem minimizes the work performed
// per threadblock in finding the next tile to compute. We return total_tiles
// unless the user has provided the SM count.
if (problem_count == 1 && override_sm_count) {
return total_tiles;
}
// Choose between the full wave of threadblocks and the tile count. If there
// are fewer tiles in the group than threadblocks in the full wave, only
// some threadblocks will be assigned tiles. Those threadblocks
// which are not assigned tiles still need to perform the work of iterating through
// problem sizes to determine that they have no work to do. This competes for cycles
// with those threadblocks that are assigned tiles to compute.
return std::min(total_tiles, occupancy_based_block_count);
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
CUTLASS_TRACE_HOST("BaseGrouped::initialize() - workspace "
<< workspace << ", stream: " << (stream ? "non-null" : "null"));
// Workspace
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes && !workspace) {
return Status::kErrorWorkspaceNull;
}
if (BaseKernel::ProblemVisitor::kRequiresPrecomputation) {
int32_t tile_count = group_tile_count(args);
Status status = precompute(args, tile_count, workspace);
if (status != Status::kSuccess) {
return status;
}
params_ = typename BaseKernel::Params(args, workspace, tile_count);
} else {
params_ = typename BaseKernel::Params(args, workspace);
}
// Specify shared memory capacity for kernel.
int smem_size = int(sizeof(typename BaseKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<BaseKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes && !workspace) {
return Status::kErrorWorkspaceNull;
}
if (BaseKernel::ProblemVisitor::kRequiresPrecomputation) {
int32_t tile_count = group_tile_count(args);
Status status = precompute(args, tile_count, workspace);
if (status != Status::kSuccess) {
return status;
}
params_.update(args, workspace, tile_count);
} else {
params_.update(args, workspace);
}
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
//
// Configure grid and block dimensions
//
if (!params_.problem_visitor.problem_count) {
return Status::kSuccess;
}
dim3 grid(params_.threadblock_count, 1, 1);
dim3 block(BaseKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename BaseKernel::SharedStorage));
//
// Launch kernel
//
// Launch
cutlass::Kernel<BaseKernel><<<grid, block, smem_size, stream>>>(params_);
//
// Query for errors
//
cudaError_t result = cudaGetLastError();
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" grid launch failed with error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Initializes and runs the kernel.
Status operator()(
Arguments const &args,
void *workspace,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/base_grouped.h/0 | {
"file_path": "include/cutlass/gemm/device/base_grouped.h",
"repo_id": "include",
"token_count": 6242
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/gemm/threadblock/gemv.h"
#include "cutlass/gemm/threadblock/default_gemv_core.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the ThreadBlock tile - concept: gemm::GemmShape<>
typename ThreadBlockShape_,
/// Size of the per-thread shape - concept: gemm::GemmShape<>
typename ThreadShape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C/D matrix
typename ElementCD_,
/// Layout of C/D matrix (concept: MatrixLayout)
typename LayoutCD_,
/// Data type of the accumulator
typename ElementAccumulator_ = ElementCD_>
struct DefaultGemv {
/// Shape of Threadblock-level matrix operation (concept: GemmShape)
using ThreadBlockShape = ThreadBlockShape_;
/// Shape of warp-level matrix operation (concept: GemmShape)
using ThreadShape = ThreadShape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulators
using ElementAccumulator = ElementAccumulator_;
/// Data type of accumulators (same as C/D)
using LayoutAccumulator = LayoutCD_;
/// Data type of input/output matrix C/D
using ElementCD = ElementCD_;
/// Layout of input/output matrix C/D
using LayoutCD = LayoutCD_;
// Define the core components
using Core = typename cutlass::gemm::threadblock::DefaultGemvCore<
ThreadBlockShape, ThreadShape, ElementA, LayoutA, ElementB, LayoutB,
ElementAccumulator, LayoutAccumulator>;
// Define the threadblock-scoped gemv
using ThreadBlockGemv = cutlass::gemm::threadblock::Gemv<Core>;
// Iterator for multiplicand A
using IteratorA = typename ThreadBlockGemv::IteratorA;
// Iterator for multiplicand B
using IteratorB = typename ThreadBlockGemv::IteratorB;
/// Policy for the iterator that reads/writes C/D
using IteratorPolicyCD = typename platform::conditional<
platform::is_same<LayoutCD, layout::RowMajor>::value,
cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous<
layout::PitchLinearShape<ThreadBlockShape::kN, ThreadBlockShape::kM>, Core::kThreadsPerN, ThreadShape::kN>,
cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided<
layout::PitchLinearShape<ThreadBlockShape::kM, ThreadBlockShape::kN>, Core::kThreadsPerN, ThreadShape::kM>>::type;
/// Iterator that reads/writes C/D
using IteratorCD = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<ThreadBlockShape::kM, ThreadBlockShape::kN>, ElementCD, LayoutCD, 0, IteratorPolicyCD>;
/// Fragment storage for C/D
using FragmentCD = typename IteratorCD::Fragment;
// Define the threadblock swizzle
using ThreadBlockSwizzle = cutlass::gemm::threadblock::GemvBatchedStridedThreadblockDefaultSwizzle;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/default_gemv.h/0 | {
"file_path": "include/cutlass/gemm/kernel/default_gemv.h",
"repo_id": "include",
"token_count": 1557
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmArray {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorA::Element const * const * ptr_A;
typename Mma::IteratorB::Params params_B;
typename Mma::IteratorB::Element const * const * ptr_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Element const * const * ptr_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::Element * const * ptr_D;
int64_t stride_D;
typename OutputOp::Params epilogue;
int batch_count;
int gemm_k_iterations;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() :
swizzle_log_tile(0) { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size_,
cutlass::gemm::GemmCoord const & grid_tiled_shape_,
typename Mma::IteratorA::Element const * const * ptr_A_,
typename Mma::IteratorA::Layout layout_A,
typename Mma::IteratorB::Element const * const * ptr_B_,
typename Mma::IteratorB::Layout layout_B,
typename Epilogue::OutputTileIterator::Element const * const * ptr_C_,
typename Epilogue::OutputTileIterator::Layout layout_C,
typename Epilogue::OutputTileIterator::Element * const * ptr_D_,
typename Epilogue::OutputTileIterator::Layout layout_D,
typename OutputOp::Params epilogue_,
int batch_count_
):
problem_size(problem_size_),
grid_tiled_shape(grid_tiled_shape_),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(layout_A),
ptr_A(ptr_A_),
params_B(layout_B),
ptr_B(ptr_B_),
params_C(layout_C),
ptr_C(ptr_C_),
params_D(layout_D),
ptr_D(ptr_D_),
epilogue(epilogue_),
batch_count(batch_count_),
gemm_k_iterations((problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK) {
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
GemmArray() { }
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Each CTA handles multiple batch indices to accommodate limited range of CUDA grid's Z dimension
for (int batch_idx = threadblock_swizzle.get_batch_idx();
batch_idx < params.batch_count;
batch_idx += gridDim.z) {
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
0
};
cutlass::MatrixCoord tb_offset_B{
0,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
const_cast<typename Mma::IteratorA::Element *>(params.ptr_A[batch_idx]),
params.problem_size.mk(),
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
const_cast<typename Mma::IteratorB::Element *>(params.ptr_B[batch_idx]),
params.problem_size.kn(),
thread_idx,
tb_offset_B);
//
// Main loop
//
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
OutputOp output_op(params.epilogue);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
const_cast<typename Epilogue::OutputTileIterator::Element *>(params.ptr_C[batch_idx]),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ptr_D[batch_idx],
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// run efficient epilogue
epilogue(output_op, iterator_D, accumulators, iterator_C);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/kernel/gemm_array.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_array.h",
"repo_id": "include",
"token_count": 3219
} | 35 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Gemm kernel with an epilogue defined under the epilogue visitor concept
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/kernel/gemm_universal.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Gemm that compute the epilogue visitor functor
template <
typename Mma, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
class GemmWithEpilogueVisitor: GemmUniversal<Mma,Epilogue, ThreadblockSwizzle_> {
public:
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Base = GemmUniversal<Mma,Epilogue, ThreadblockSwizzle>;
using Base::Base;
using FusionCallbacks = typename Epilogue::FusionCallbacks;
using ElementA = typename Base::ElementA;
using LayoutA = typename Base::LayoutA;
using ElementB = typename Base::ElementB;
using LayoutB = typename Base::LayoutB;
using ElementC = typename Base::ElementC;
using LayoutC = typename Base::LayoutC;
using ThreadblockShape = typename Mma::Shape;
//
// Structures
//
using SharedStorage = typename Base::SharedStorage;
using Arguments = typename Base::Arguments;
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
//
// Data members
//
cute::Shape<int32_t,int32_t,int32_t> problem_shape;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename FusionCallbacks::Params output_op;
void * ptr_A;
void * ptr_B;
int64_t batch_stride_A;
int64_t batch_stride_B;
int * ptr_gather_A_indices;
int * ptr_gather_B_indices;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
output_op(FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/)),
problem_shape({args.problem_size.m(), args.problem_size.n(), args.batch_count}),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
ptr_gather_A_indices(const_cast<int *>(args.ptr_gather_A_indices)),
ptr_gather_B_indices(const_cast<int *>(args.ptr_gather_B_indices))
{
// Raise error on unsupported modes
assert(args.mode != GemmUniversalMode::kGemmSplitKParallel && "Sm80 EVT does not support SplitKParallel.");
assert(!(args.mode == GemmUniversalMode::kGemm && this->grid_tiled_shape.k() > 1 )
&& "Sm80 EVT does not support SplitKSerial.");
assert(args.mode != GemmUniversalMode::kArray && "Sm80 EVT does not support Array Gemm.");
}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversalwithVisitor::Params::update()");
// Update input pointers
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
this->batch_stride_D = args.batch_stride_D;
ptr_gather_A_indices = const_cast<int *>(args.ptr_gather_A_indices);
ptr_gather_B_indices = const_cast<int *>(args.ptr_gather_B_indices);
output_op = FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/);
problem_shape = make_shape(args.problem_size.m(), args.problem_size.n(), args.batch_count);
}
};
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmWithEpilogueVisitor op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
ThreadblockSwizzle threadblock_swizzle;
run_with_swizzle(params, shared_storage, threadblock_swizzle);
}
/// Executes one GEMM with an externally-provided swizzling function
CUTLASS_DEVICE
void run_with_swizzle(Params const ¶ms, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) {
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A,
params.ptr_gather_A_indices);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B,
params.ptr_gather_B_indices);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
Epilogue epilogue(
params.output_op,
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(accumulators, threadblock_tile_offset, params.problem_shape, thread_idx);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_universal_with_visitor.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_universal_with_visitor.h",
"repo_id": "include",
"token_count": 3758
} | 36 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/workspace.h"
#include "cutlass/fast_math.h"
#include "cutlass/kernel_hardware_info.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/arch/reg_reconfig.h"
#include "cutlass/arch/mma_sm90.h"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/group_array_problem_shape.hpp"
#include "cutlass/gemm/kernel/tile_scheduler.hpp"
#include "cutlass/pipeline/pipeline.hpp"
#include "cute/tensor.hpp"
#include "cutlass/trace.h"
///////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::kernel {
///////////////////////////////////////////////////////////////////////////////
template <
class ProblemShape_,
class CollectiveMainloop_,
class CollectiveEpilogue_,
class TileScheduler_
>
class GemmUniversal<
ProblemShape_,
CollectiveMainloop_,
CollectiveEpilogue_,
TileScheduler_,
cute::enable_if_t<cute::is_base_of_v<KernelPtrArrayTmaWarpSpecializedCooperative, typename CollectiveMainloop_::DispatchPolicy::Schedule>>
>
{
public:
//
// Type Aliases
//
using ProblemShape = ProblemShape_;
static_assert(rank(typename ProblemShape::UnderlyingProblemShape{}) == 3 or rank(typename ProblemShape::UnderlyingProblemShape{}) == 4,
"ProblemShape{} should be <M,N,K> or <M,N,K,L>");
// Mainloop derived types
using CollectiveMainloop = CollectiveMainloop_;
using TileShape = typename CollectiveMainloop::TileShape;
using TiledMma = typename CollectiveMainloop::TiledMma;
using ArchTag = typename CollectiveMainloop::ArchTag;
using ElementA = typename CollectiveMainloop::ElementA;
using StrideA = typename CollectiveMainloop::StrideA;
using UnderlyingStrideA = typename CollectiveMainloop::UnderlyingStrideA;
using ElementB = typename CollectiveMainloop::ElementB;
using UnderlyingStrideB = typename CollectiveMainloop::UnderlyingStrideB;
using StrideB = typename CollectiveMainloop::StrideB;
using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy;
using Schedule = typename DispatchPolicy::Schedule;
using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator;
using ClusterShape = typename DispatchPolicy::ClusterShape;
using MainloopArguments = typename CollectiveMainloop::Arguments;
using MainloopParams = typename CollectiveMainloop::Params;
// Epilogue derived types
using CollectiveEpilogue = CollectiveEpilogue_;
using ElementC = typename CollectiveEpilogue::ElementC;
using StrideC = typename CollectiveEpilogue::StrideC;
using UnderlyingStrideC = typename CollectiveEpilogue::UnderlyingStrideC;
using ElementD = typename CollectiveEpilogue::ElementD;
using StrideD = typename CollectiveEpilogue::StrideD;
using UnderlyingStrideD = typename CollectiveEpilogue::UnderlyingStrideD;
using EpilogueArguments = typename CollectiveEpilogue::Arguments;
using EpilogueParams = typename CollectiveEpilogue::Params;
static_assert(ArchTag::kMinComputeCapability >= 90);
static_assert(cute::is_void_v<TileScheduler_>,
"Ptr-Array Cooperative and Grouped Gemm Cooperative kernel only supports the default scheduler.");
static constexpr bool IsGroupedGemmKernel = !cute::is_same_v<UnderlyingStrideA, StrideA>;
using TileScheduler = cute::conditional_t<IsGroupedGemmKernel,
typename detail::TileSchedulerSelector<
GroupScheduler, ArchTag,
TileShape, ClusterShape,
ProblemShape>::Scheduler,
typename detail::TileSchedulerSelector<
void, ArchTag, TileShape, ClusterShape>::Scheduler>;
using TileSchedulerArguments = typename TileScheduler::Arguments;
using TileSchedulerParams = typename TileScheduler::Params;
static constexpr uint32_t NumLoadWarpGroups = 1;
static constexpr uint32_t NumMmaWarpGroups = CUTE_STATIC_V(size(TiledMma{})) / NumThreadsPerWarpGroup;
static constexpr uint32_t MaxThreadsPerBlock = CUTE_STATIC_V(size(TiledMma{})) + (NumLoadWarpGroups * NumThreadsPerWarpGroup);
static constexpr uint32_t MinBlocksPerMultiprocessor = 1;
/// Register requirement for Load and Math WGs
static constexpr uint32_t LoadRegisterRequirement = 40;
static constexpr uint32_t MmaRegisterRequirement = 232;
// 1 stage ordered sequence between mainloop and epilogue producer load threads
using LoadWarpOrderBarrier = cutlass::OrderedSequenceBarrier<1,2>;
// Kernel level shared memory storage
struct SharedStorage {
struct TensorStorage : cute::aligned_struct<128> {
using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage;
using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage;
MainloopTensorStorage mainloop;
EpilogueTensorStorage epilogue;
} tensors;
struct PipelineStorage : cute::aligned_struct<16> {
using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage;
using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage;
alignas(16) MainloopPipelineStorage mainloop;
alignas(16) EpiLoadPipelineStorage epi_load;
alignas(16) typename LoadWarpOrderBarrier::SharedStorage load_order;
} pipelines;
struct TensorMapStorage : cute::aligned_struct<128> {
using MainloopTensorMapStorage = typename CollectiveMainloop::TensorMapStorage;
alignas(128) MainloopTensorMapStorage mainloop;
} tensormaps;
};
static constexpr int SharedStorageSize = sizeof(SharedStorage);
// Device side arguments
struct Arguments {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopArguments mainloop{};
EpilogueArguments epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerArguments scheduler{};
};
// Kernel entry point API
struct Params {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopParams mainloop{};
EpilogueParams epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerParams scheduler{};
void* workspace{nullptr};
};
//
// Methods
//
// Convert to underlying arguments. In this case, a simple copy for the aliased type.
static
Params
to_underlying_arguments(Arguments const& args, void* workspace) {
CUTLASS_TRACE_HOST("to_underlying_arguments():");
ProblemShape problem_shapes = args.problem_shape;
// Get SM count if needed, otherwise use user supplied SM count
int sm_count = args.hw_info.sm_count;
if (sm_count <= 0) {
CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n"
" For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count.");
sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id);
}
CUTLASS_TRACE_HOST("to_underlying_arguments(): Setting persistent grid SM count to " << sm_count);
KernelHardwareInfo hw_info{args.hw_info.device_id, sm_count};
// Calculate workspace pointers
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
void* scheduler_workspace = workspace_ptr;
workspace_offset += TileScheduler::template get_workspace_size<typename ProblemShape::UnderlyingProblemShape, ElementAccumulator>(
args.scheduler, typename ProblemShape::UnderlyingProblemShape{}, args.hw_info, NumMmaWarpGroups);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
void* epilogue_workspace = workspace_ptr + workspace_offset;
workspace_offset += CollectiveEpilogue::get_workspace_size(problem_shapes, args.epilogue);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
void* mainloop_workspace = workspace_ptr + workspace_offset;
workspace_offset += CollectiveMainloop::get_workspace_size(problem_shapes, args.mainloop, args.hw_info.sm_count);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
// Precompute the sub tiles numbers in epilogue, pass into tile scheduler. Therefore it will be used
// in separate reduction scheme for streamk case, NumEpilogueSubTiles default value is 1, which means
// subtile will not be used, therefore separate reduction will not be enabled.
constexpr uint32_t NumEpilogueSubTiles = CollectiveEpilogue::get_store_pipe_increment(TileShape{});
TileSchedulerParams scheduler;
if constexpr (IsGroupedGemmKernel) {
scheduler = TileScheduler::to_underlying_arguments(
problem_shapes, TileShape{}, ClusterShape{}, hw_info, args.scheduler, scheduler_workspace, NumEpilogueSubTiles);
}
else {
scheduler = TileScheduler::to_underlying_arguments(
problem_shapes.get_host_problem_shape(), TileShape{}, ClusterShape{}, hw_info, args.scheduler, scheduler_workspace, NumEpilogueSubTiles);
}
return {
args.mode,
problem_shapes,
CollectiveMainloop::to_underlying_arguments(problem_shapes, args.mainloop, mainloop_workspace),
CollectiveEpilogue::to_underlying_arguments(problem_shapes, args.epilogue, epilogue_workspace),
hw_info,
scheduler,
workspace
};
}
CUTLASS_HOST_DEVICE static
bool
can_implement(Arguments const& args) {
bool implementable = true;
if constexpr (IsGroupedGemmKernel) {
// Group GEMM currently only supports rank-3 problem shapes
implementable &= (args.mode == GemmUniversalMode::kGrouped && rank(typename ProblemShape::UnderlyingProblemShape{}) == 3);
} else {
implementable &= (args.mode == GemmUniversalMode::kArray && rank(typename ProblemShape::UnderlyingProblemShape{}) == 4);
}
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements for Ptr Array Gemm or Grouped Gemm.\n");
return implementable;
}
implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop);
implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue);
implementable &= TileScheduler::can_implement(args.scheduler);
return implementable;
}
static size_t
get_workspace_size(Arguments const& args) {
size_t workspace_size = 0;
constexpr uint32_t NumEpilogueSubTiles = CollectiveEpilogue::get_store_pipe_increment(TileShape{});
workspace_size += TileScheduler::template get_workspace_size<typename ProblemShape::UnderlyingProblemShape, ElementAccumulator>(
args.scheduler, typename ProblemShape::UnderlyingProblemShape{}, args.hw_info, NumMmaWarpGroups, NumEpilogueSubTiles);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
// Get SM count if needed, otherwise use user supplied SM count
int sm_count = args.hw_info.sm_count;
if (sm_count <= 0) {
CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n"
" For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count.");
sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id);
}
workspace_size += CollectiveMainloop::get_workspace_size(args.problem_shape, args.mainloop, sm_count);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
return workspace_size;
}
static cutlass::Status
initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr,
CudaHostAdapter* cuda_adapter = nullptr) {
Status status = Status::kSuccess;
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
constexpr uint32_t NumEpilogueSubTiles = CollectiveEpilogue::get_store_pipe_increment(TileShape{});
status = TileScheduler::template initialize_workspace<typename ProblemShape::UnderlyingProblemShape, ElementAccumulator>(
args.scheduler, workspace_ptr + workspace_offset, stream, typename ProblemShape::UnderlyingProblemShape{}, args.hw_info, NumMmaWarpGroups, NumEpilogueSubTiles);
workspace_offset += TileScheduler::template get_workspace_size<typename ProblemShape::UnderlyingProblemShape, ElementAccumulator>(
args.scheduler, typename ProblemShape::UnderlyingProblemShape{}, args.hw_info, NumMmaWarpGroups, NumEpilogueSubTiles);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = CollectiveEpilogue::initialize_workspace(args.problem_shape, args.epilogue, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
status = CollectiveMainloop::initialize_workspace(args.problem_shape, args.mainloop, workspace_ptr + workspace_offset, stream);
workspace_offset += CollectiveMainloop::get_workspace_size(args.problem_shape, args.mainloop, args.hw_info.sm_count);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
return status;
}
// Computes the kernel launch grid shape based on runtime parameters
static dim3
get_grid_shape(Params const& params) {
// Given device SM count, set grid size s.t. we do not launch more thread blocks than we can run concurrently
TileSchedulerArguments args{};
if constexpr (!std::is_const_v<decltype(args.max_swizzle_size)>) {
args.max_swizzle_size = 1 << params.scheduler.log_swizzle_size_;
}
args.raster_order = params.scheduler.raster_order_ == TileScheduler::RasterOrder::AlongN ? TileScheduler::RasterOrderOptions::AlongN : TileScheduler::RasterOrderOptions::AlongM;
dim3 grid_shape;
if constexpr (IsGroupedGemmKernel) {
grid_shape = TileScheduler::get_grid_shape(params.problem_shape, TileShape{}, ClusterShape{}, params.hw_info, args);
}
else {
grid_shape = TileScheduler::get_grid_shape(params.problem_shape.get_host_problem_shape(), TileShape{}, ClusterShape{}, params.hw_info, args);
}
return grid_shape;
}
static dim3
get_block_shape() {
return dim3(MaxThreadsPerBlock, 1, 1);
}
CUTLASS_DEVICE
void
operator()(Params const& params, char* smem_buf) {
using namespace cute;
using X = Underscore;
// Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a.
#if ! defined(__CUDA_ARCH_FEAT_SM90_ALL)
printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n");
#else
// Preconditions
static_assert(size(TiledMma{}) == 256, "Cooperative kernel must have TiledMMA operating using 256 threads.");
static_assert(size<0>(TileShape{}) >= 128,
"Cooperative kernel requires Tile Size to be greater than or equal to 128 along the M-dimension.");
static_assert(cute::rank(UnderlyingStrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(UnderlyingStrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(UnderlyingStrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(UnderlyingStrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
/* In the Cooperative kernel, Consumer0 and Consumer1 collaborate on the same tile */
enum class WarpGroupRole {
Producer = 0,
Consumer0 = 1,
Consumer1 = 2
};
enum class ProducerWarpRole {
Mainloop = 0,
Warp1 = 1,
Epilogue = 2,
Warp3 = 3
};
// Kernel level shared memory storage
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(smem_buf);
int thread_idx = int(threadIdx.x);
int lane_idx = canonical_lane_idx();
int warp_idx = canonical_warp_idx_sync();
int warp_idx_in_warp_group = warp_idx % NumWarpsPerWarpGroup;
int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup;
int mma_thread_idx = thread_idx % size(TiledMma{});
auto warp_group_role = WarpGroupRole(canonical_warp_group_idx());
auto producer_warp_role = ProducerWarpRole(warp_idx_in_warp_group);
int lane_predicate = cute::elect_one_sync();
uint32_t block_rank_in_cluster = cute::block_rank_in_cluster();
// Note: Tma Descriptor Prefetch (from either const or param) is not applicable here
// Mainloop Load pipeline
using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline;
typename MainloopPipeline::Params mainloop_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Mainloop) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer;
}
mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0;
mainloop_pipeline_params.num_consumers = size(TiledMma{});
mainloop_pipeline_params.transaction_bytes = CollectiveMainloop::TmaTransactionBytes;
MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params, ClusterShape{});
// Epilogue Load pipeline
using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline;
typename EpiLoadPipeline::Params epi_load_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Epilogue) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer;
}
epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster();
epi_load_pipeline_params.producer_arv_count = NumThreadsPerWarp;
epi_load_pipeline_params.consumer_arv_count = size(TiledMma{});
epi_load_pipeline_params.transaction_bytes = CollectiveEpilogue::TmaTransactionBytes;
EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params);
// Epilogue Store pipeline
using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline;
typename EpiStorePipeline::Params epi_store_pipeline_params;
epi_store_pipeline_params.always_wait = true;
EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params);
typename LoadWarpOrderBarrier::Params params_load_order_barrier;
params_load_order_barrier.group_id = producer_warp_role == ProducerWarpRole::Mainloop ? 0 : 1;
params_load_order_barrier.group_size = NumThreadsPerWarp;
LoadWarpOrderBarrier load_order_barrier(shared_storage.pipelines.load_order, params_load_order_barrier);
// Initialize starting pipeline states for the collectives
// Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding)
typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state;
typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state;
// For the DMA Load (producer) we start with an opposite phase
// i.e., we skip all waits since we know that the buffer is indeed empty
PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state<MainloopPipeline>();
PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state<EpiLoadPipeline>();
PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state<EpiStorePipeline>();
auto cluster_wait_fn = [] () {
// We need this to guarantee that the Pipeline init is visible
// To all producers and consumer thread blocks in the Cluster
if constexpr (size(ClusterShape{}) > 1) {
cute::cluster_arrive_relaxed();
return [] () { cute::cluster_wait(); };
}
else {
__syncthreads();
return [] () {}; // do nothing
}
} ();
// Get the appropriate blocks for this thread block -- potential for thread block locality
TiledMma tiled_mma;
auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K)
TileScheduler scheduler{params.scheduler};
auto work_tile_info = scheduler.get_current_work();
if (not work_tile_info.is_valid()) {
return;
}
// Optionally append 1s until problem shape is rank-4 in case it is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(params.problem_shape.get_problem_shape(work_tile_info.L_idx), Int<1>{});
// In a warp specialized kernel, collectives expose data movement and compute operations separately
CollectiveMainloop collective_mainloop;
CollectiveEpilogue collective_epilogue(params.epilogue, shared_storage.tensors.epilogue);
// Prepare and partition the input tensors. Expects a tuple of tensors where:
// get<0>(load_inputs) is the tma tensor A after local tiling so that it has shape (BLK_M,BLK_K,m,k,l)
// get<1>(load_inputs) is the tma tensor B after local tiling so that it has shape (BLK_N,BLK_K,n,k,l)
auto load_inputs = collective_mainloop.load_init(problem_shape_MNKL, params.mainloop);
static_assert(cute::tuple_size_v<decltype(load_inputs)> >= 2, "Output of load_init must have at least two elements (A, B)");
// Extract out partitioned A and B.
Tensor gA_mkl = get<0>(load_inputs);
Tensor gB_nkl = get<1>(load_inputs);
// Get pipeline stage increments from tensor shapes
auto k_tile_count = size<3>(gA_mkl);
// Wait for all thread blocks in the Cluster
cluster_wait_fn();
if (warp_group_role == WarpGroupRole::Producer) {
cutlass::arch::warpgroup_reg_dealloc<LoadRegisterRequirement>();
// Mainloop Producer Warp
if (producer_warp_role == ProducerWarpRole::Mainloop) {
int32_t curr_batch = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); // Usually just returns work_tile_info.L_idx;
int32_t next_batch = curr_batch;
int32_t const mock_l_coord = 0;
int32_t const sm_idx = blockIdx.x + (blockIdx.y * gridDim.x);
int32_t const sm_count = params.hw_info.sm_count;
// Fetch a copy of tensormaps for the CTA
auto input_tensormaps = collective_mainloop.tensormaps_init(params.mainloop, sm_count, sm_idx);
// Update tensormap for the initial batch for the CTA
if (work_tile_info.is_valid()) {
collective_mainloop.tensormaps_perform_update(
shared_storage.tensormaps.mainloop,
params.mainloop,
input_tensormaps,
problem_shape_MNKL,
next_batch
);
// Ensure warp is converged before issuing tensor replace
__syncwarp();
// Entire warp must do this (ie its aligned)
collective_mainloop.tensormaps_cp_fence_release(shared_storage.tensormaps.mainloop, input_tensormaps);
}
bool do_load_order_arrive = true;
while (work_tile_info.is_valid()) {
if (!TileScheduler::valid_warpgroup_in_work_tile(work_tile_info)) {
work_tile_info = fetch_next_work(work_tile_info, scheduler);
continue;
}
// Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape
auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl));
auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, mock_l_coord);
// Get the number of K tiles to compute for this work as well as the starting K tile offset of the work.
auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape);
auto work_k_tile_start = TileScheduler::get_work_k_tile_start(work_tile_info);
auto k_tile_iter = cute::make_coord_iterator(idx2crd(work_k_tile_start, shape<3>(gA_mkl)), shape<3>(gA_mkl));
collective_mainloop.tensormaps_fence_acquire(input_tensormaps);
collective_mainloop.load(
params.mainloop,
mainloop_pipeline,
mainloop_pipe_producer_state,
load_inputs,
input_tensormaps,
blk_coord,
k_tile_iter, work_k_tile_count,
lane_idx,
block_rank_in_cluster,
shared_storage.tensors.mainloop
);
// Update starting pipeline state for the next tile
// Wait for the last TMA stage to complete loading, before issuing tensormap updates
mainloop_pipe_producer_state.advance(work_k_tile_count - 1);
// Signal for the epilogue load warp to begin
if (do_load_order_arrive) {
load_order_barrier.arrive();
do_load_order_arrive = false;
}
// Get next work tile
work_tile_info = fetch_next_work(work_tile_info, scheduler);
next_batch = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); // Usually just returns work_tile_info.L_idx
if (work_tile_info.is_valid() && next_batch != curr_batch ) {
if constexpr (IsGroupedGemmKernel) {
problem_shape_MNKL = append<4>(params.problem_shape.get_problem_shape(next_batch), Int<1>{});
}
// Purpose of this pipeline state is to make sure TMA loads have finished before doing descriptor updates
// Since this state is waiting for loads to finish, it must start in the inverted phase.
typename CollectiveMainloop::PipelineState mainloop_pipe_tma_consumer_state =
{mainloop_pipe_producer_state.index(), !mainloop_pipe_producer_state.phase(), mainloop_pipe_producer_state.count()};
mainloop_pipeline.consumer_wait(mainloop_pipe_tma_consumer_state);
collective_mainloop.tensormaps_perform_update(
shared_storage.tensormaps.mainloop,
params.mainloop,
input_tensormaps,
problem_shape_MNKL,
next_batch
);
// Ensure warp is converged before issuing tensor replace
__syncwarp();
// Entire warp must do this (ie its aligned)
collective_mainloop.tensormaps_cp_fence_release(shared_storage.tensormaps.mainloop, input_tensormaps);
curr_batch = next_batch;
}
// Advance the producer state for the last remaining stage that was being waited for above
mainloop_pipe_producer_state.advance(1);
} // Scheduler work fetch loop
// Make sure all Consumer Warp Groups have been waited upon
collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state);
} // Mainloop Producer Warp End
// Epilogue Producer Warp
else if (producer_warp_role == ProducerWarpRole::Epilogue && collective_epilogue.is_producer_load_needed()) {
while (work_tile_info.is_valid()) {
if (!TileScheduler::requires_separate_reduction(params.scheduler)) {
load_order_barrier.wait();
}
if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler)) {
// Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape
auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl));
auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl));
auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, l_coord);
epi_load_pipe_producer_state =
collective_epilogue.load(
epi_load_pipeline,
epi_load_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
tiled_mma,
lane_idx,
shared_storage.tensors.epilogue,
work_tile_info.reduction_subtile_idx()
);
}
// Get next work tile
work_tile_info = fetch_next_work(work_tile_info, scheduler);
if constexpr (IsGroupedGemmKernel) {
if (work_tile_info.is_valid()) {
problem_shape_MNKL = append<4>(params.problem_shape.get_problem_shape(work_tile_info.L_idx), Int<1>{});
}
}
} // Scheduler work fetch loop
// Make sure all Consumer Warp Groups have been waited upon
collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state);
} // Epilogue Producer Warp End
} // Producer Warp Group End
else if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) {
cutlass::arch::warpgroup_reg_alloc<MmaRegisterRequirement>();
// Do we potentially issue tail arrives for TMA stores, if epilogue load is waiting for it
bool do_store_tail = false;
while (work_tile_info.is_valid()) {
// Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape
auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl));
auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl));
auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, l_coord);
auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape);
// Allocate the accumulators for the (M,N) blk_shape
//
// MSVC CTAD breaks if we say "Tensor" here, so we use "auto" instead.
auto accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N)
if(TileScheduler::valid_warpgroup_in_work_tile(work_tile_info)) {
collective_mainloop.mma(
mainloop_pipeline,
mainloop_pipe_consumer_state,
accumulators,
work_k_tile_count,
mma_thread_idx,
shared_storage.tensors.mainloop,
params.mainloop
);
// Make sure the math instructions are done and free buffers before entering the epilogue
collective_mainloop.mma_tail(
mainloop_pipeline,
mainloop_pipe_consumer_state,
work_k_tile_count
);
// Update starting mainloop pipeline state for the next tile
mainloop_pipe_consumer_state.advance(work_k_tile_count);
}
// Index of warp group within consumer warp groups
int consumer_warp_group_idx = canonical_warp_group_idx() - NumLoadWarpGroups;
// Perform reduction across splits, if needed
TileScheduler::fixup(
params.scheduler, work_tile_info, accumulators, NumMmaWarpGroups, consumer_warp_group_idx);
if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler)) {
// Epilogue and write to gD
auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] =
collective_epilogue.store(
epi_load_pipeline,
epi_load_pipe_consumer_state,
epi_store_pipeline,
epi_store_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
accumulators,
tiled_mma,
mma_thread_idx,
shared_storage.tensors.epilogue,
work_tile_info.reduction_subtile_idx()
);
epi_load_pipe_consumer_state = epi_load_pipe_consumer_state_next;
epi_store_pipe_producer_state = epi_store_pipe_producer_state_next;
do_store_tail = true;
}
// Get next work tile
work_tile_info = fetch_next_work(work_tile_info, scheduler);
if constexpr (IsGroupedGemmKernel) {
if (work_tile_info.is_valid()) {
problem_shape_MNKL = append<4>(params.problem_shape.get_problem_shape(work_tile_info.L_idx), Int<1>{});
}
}
} // Scheduler work fetch loop
if (do_store_tail) {
collective_epilogue.store_tail(
epi_load_pipeline,
epi_load_pipe_consumer_state,
epi_store_pipeline,
epi_store_pipe_producer_state
);
}
} // Consumer Warp Groups End
#endif
}
private:
// Kernel helper function to get next work unit
CUTLASS_DEVICE
typename TileScheduler::WorkTileInfo
fetch_next_work(
typename TileScheduler::WorkTileInfo& work_tile_info,
TileScheduler& scheduler) const {
// Check whether we should continue on with the current work unit. If this is the case,
// the work unit will have been updated in continue_current_work to reflect the new
// tile to be computed.
if (scheduler.continue_current_work(work_tile_info)) {
return work_tile_info;
}
// Get next work tile
scheduler.advance_to_next_work();
return scheduler.get_current_work();
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::kernel
| include/cutlass/gemm/kernel/sm90_gemm_array_tma_warpspecialized_cooperative.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/sm90_gemm_array_tma_warpspecialized_cooperative.hpp",
"repo_id": "include",
"token_count": 13400
} | 37 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/*! \file
\brief Utilities for selecting default tile schedulers
*/
#include "cutlass/detail/dependent_false.hpp"
#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp"
#include "cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp"
#include "cutlass/gemm/kernel/sm90_tile_scheduler_group.hpp"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm {
////////////////////////////////////////////////////////////////////////////////
//
// Tags for specifying tile schedulers
//
struct PersistentScheduler { };
struct StreamKScheduler { };
struct GroupScheduler { }; // Only used for Grouped GEMMs
////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm
////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::kernel::detail {
//
// Selectors mapping tile scheduler tag and arch tag to a tile scheduler class
//
template <
class TileSchedulerTag,
class ArchTag,
class TileShape,
class ClusterShape
, class ProblemShapeType = void
>
struct TileSchedulerSelector {
static_assert(cutlass::detail::dependent_false<ArchTag>,
"Could not select a tile scheduler for given parameters.");
};
template <
class ArchTag,
class TileShape,
class ClusterShape
>
struct TileSchedulerSelector<
PersistentScheduler,
ArchTag,
TileShape,
ClusterShape
> {
using Scheduler = PersistentTileSchedulerSm90;
};
// Default (void) for Sm90 maps to PersistentTileSchedulerSm90
template <
class ArchTag,
class TileShape,
class ClusterShape
>
struct TileSchedulerSelector<
void,
ArchTag,
TileShape,
ClusterShape
> {
using Scheduler = typename TileSchedulerSelector<
PersistentScheduler,
ArchTag,
TileShape,
ClusterShape
>::Scheduler;
};
template <
class TileShape,
class ClusterShape
>
struct TileSchedulerSelector<
StreamKScheduler,
arch::Sm90,
TileShape,
ClusterShape
> {
using Scheduler = PersistentTileSchedulerSm90StreamK<TileShape, ClusterShape>;
};
template <
class TileShape,
class ClusterShape
, class GroupProblemShape
>
struct TileSchedulerSelector<
GroupScheduler,
arch::Sm90,
TileShape,
ClusterShape
, GroupProblemShape
> {
using Scheduler = PersistentTileSchedulerSm90Group<GroupProblemShape>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::kernel::detail
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/tile_scheduler.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/tile_scheduler.hpp",
"repo_id": "include",
"token_count": 1192
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting simt instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/threadblock/mma_pipelined.h"
#include "cutlass/gemm/threadblock/mma_singlestage.h"
#include "cutlass/arch/cache_operation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Size of a threadblock-scoped access
int kAccessSizeInBits = -1, // -1 denoting the default
/// Number of stages
int Stages = 2,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global,
/// per-element transformation for elements of A
ComplexTransform TransformA = ComplexTransform::kNone,
/// per-element transformation for elements of B
ComplexTransform TransformB = ComplexTransform::kNone,
bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value)
>
struct DefaultMmaCoreWithAccessSize;
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// per-element transformation for elements of A
ComplexTransform TransformA,
/// per-element transformation for elements of B
ComplexTransform TransformB,
bool IsComplex
>
struct DefaultMmaCoreWithAccessSize<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, -1, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> : DefaultMmaCore<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a threadblock-scoped access (a value of -1 indicates the default)
int kAccessSizeInBits_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCoreWithAccessSize<Shape_, WarpShape_, typename platform::enable_if<kAccessSizeInBits_ != -1, GemmShape<1, 1, 1>>::type, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassSimt, kAccessSizeInBits_, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
static int const kElementsPerAccessDefault = 1;
static_assert(kAccessSizeInBits_ == -1 ||
sizeof_bits<ElementA>::value == sizeof_bits<ElementB>::value ||
kAccessSizeInBits_ / sizeof_bits<ElementA>::value == kElementsPerAccessDefault,
"Non-default value for kAccessSizeInBits_ is only allowed if size(elementA) == sizeof(elementB)");
static int const kElementsPerAccess = (kAccessSizeInBits_ != -1) ? kAccessSizeInBits_ / sizeof_bits<ElementA>::value : kElementsPerAccessDefault;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/default_mma_core_with_access_size.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_mma_core_with_access_size.h",
"repo_id": "include",
"token_count": 4136
} | 39 |