text
stringlengths 27
947k
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
80
|
---|---|---|---|
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for emitting Conv2d kernels
"""
import enum
import logging
import os.path
import shutil
from string import Template
try:
import builtins
if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True:
raise ImportError("Disabling attempt to import cutlass_library")
from cutlass_library.library import *
from cutlass_library.conv3x_emitter import EmitConv3xInstance, EmitConv3xIncludes
except ImportError:
from library import *
from conv3x_emitter import EmitConv3xInstance, EmitConv3xIncludes
_LOGGER = logging.getLogger(__name__)
###################################################################################################
#
class Conv2dOperation:
#
def __init__(self, conv_kind, iterator_algorithm, arch, tile_description, A, B, C, element_epilogue, \
stride_support, epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity1, \
group_mode = GroupMode.NoneGroup):
self.operation_kind = OperationKind.Conv2d
self.arch = arch
self.tile_description = tile_description
self.conv_kind = conv_kind
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.iterator_algorithm = iterator_algorithm
self.stride_support = stride_support
self.swizzling_functor = swizzling_functor
self.group_mode = group_mode
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian
]
return self.tile_description.math_instruction.math_operation in complex_operators
#
def is_mixed_input(self):
return self.A.element != self.B.element
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
intermediate_type = ''
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp:
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.accumulator_type():
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
else:
inst_shape = ''
return "%s%s%s%s_%s" % (ShortDataTypeNames[self.accumulator_type()], \
inst_shape, intermediate_type, ConvKindNames[self.conv_kind], IteratorAlgorithmNames[self.iterator_algorithm])
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
threadblock = self.tile_description.procedural_name()
# grouped conv
if self.group_mode != GroupMode.NoneGroup:
group_conv_name = f"{GroupModeNames[self.group_mode]}_"
else:
group_conv_name = ""
if self.stride_support == StrideSupport.Unity:
configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_unity_stride_${group_conv_name}align${alignment}"
else:
configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${group_conv_name}align${alignment}"
return SubstituteTemplate(
configuration_name,
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'alignment': "%d" % self.A.alignment,
'group_conv_name': group_conv_name
}
)
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.configuration_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
class EmitConv2dInstance:
def __init__(self):
# Emitter for CUTLASS 3 convolution operations
self.conv3x_emitter = EmitConv3xInstance()
self.template = """
// Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>,
${stages},
${math_operator},
${iterator_algorithm},
${stride_support},
${align_a},
${align_b}
>::Kernel;
"""
self.template_group_conv = """
// Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultConv2dGroup${conv_kind_name}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>,
${stages},
${math_operator},
${group_mode},
${iterator_algorithm},
${stride_support},
${align_a},
${align_b}
>::Kernel;
"""
self.template_depthwise_direct_conv = """
// Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConv${conv_kind_name}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::conv::TensorNHWCShape<${threadblock_output_shape_n}, ${threadblock_output_shape_p}, ${threadblock_output_shape_q}, ${groups_per_cta}>,
cutlass::MatrixShape<${filter_shape_r}, ${filter_shape_s}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue},
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle<
1,
${threadblock_output_shape_n},
${threadblock_output_shape_p},
${threadblock_output_shape_q}>,
${stages},
${math_operator},
${iterator_algorithm},
${stride_support},
cutlass::MatrixShape<${stride_r}, ${stride_s}>,
cutlass::MatrixShape<${dilation_r}, ${dilation_s}>
>::Kernel;
"""
def arch_number_to_type(self, arch: int):
return f"cutlass::arch::Sm{arch}"
def emit(self, operation):
_LOGGER.debug("*** EmitConv2dInstance::emit")
_LOGGER.debug("*** operation: procedural_name()=" + operation.procedural_name())
if hasattr(operation, 'is_3x') and operation.is_3x:
_LOGGER.debug("*** CUTLASS 3 operation")
return self.conv3x_emitter.emit(operation)
_LOGGER.debug("*** CUTLASS 2 operation")
warp_shape = [int(operation.tile_description.threadblock_shape[idx] / operation.tile_description.warp_count[idx]) for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'conv_kind': ConvKindTag[operation.conv_kind],
'conv_kind_name': ConvKindNames[operation.conv_kind].capitalize(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'iterator_algorithm': IteratorAlgorithmTag[operation.iterator_algorithm],
'iterator_algorithm_name': IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(),
'stride_support': StrideSupportTag[operation.stride_support],
'math_operator': 'cutlass::arch::OpMultiplyAddComplex' if operation.is_complex() else \
MathOperationTag[operation.tile_description.math_instruction.math_operation],
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
}
if operation.group_mode == GroupMode.NoneGroup:
_LOGGER.debug("*** group_mode=NoneGroup")
return SubstituteTemplate(self.template, values)
elif operation.group_mode == GroupMode.Depthwise:
_LOGGER.debug("*** group_mode=Depthwise")
values['group_mode'] = GroupModeTag[operation.group_mode]
# Setup other template params
values['threadblock_output_shape_n'] = str(operation.tile_description.threadblock_output_shape[0])
values['threadblock_output_shape_p'] = str(operation.tile_description.threadblock_output_shape[1])
values['threadblock_output_shape_q'] = str(operation.tile_description.threadblock_output_shape[2])
values['groups_per_cta'] = str(operation.tile_description.threadblock_output_shape[3])
values['filter_shape_r'] = str(operation.tile_description.filter_shape[0])
values['filter_shape_s'] = str(operation.tile_description.filter_shape[1])
values['stride_r'] = str(operation.tile_description.stride[0])
values['stride_s'] = str(operation.tile_description.stride[1])
values['dilation_r'] = str(operation.tile_description.dilation[0])
values['dilation_s'] = str(operation.tile_description.dilation[1])
return SubstituteTemplate(self.template_depthwise_direct_conv, values)
else:
_LOGGER.debug("*** group_mode=" + GroupModeTag[operation.group_mode])
values['group_mode'] = GroupModeTag[operation.group_mode]
return SubstituteTemplate(self.template_group_conv, values)
###################################################################################################
#
# Generator functions for all layouts
#
###################################################################################################
#
def GenerateConv2dTensorOp(manifest, tile_descriptions, min_cc, align = 128):
_LOGGER.debug("*** GenerateConv2dTensorOp")
for tile in tile_descriptions:
for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]:
if conv_kind == ConvKind.Fprop or (tile.math_instruction.element_accumulator in [DataType.f16, DataType.f32]):
#
output_types = [tile.math_instruction.element_a, tile.math_instruction.element_accumulator] \
if DataTypeSize[tile.math_instruction.element_accumulator] == 32 \
else [tile.math_instruction.element_accumulator,]
for output_type in output_types:
A = TensorDescription(tile.math_instruction.element_a, LayoutType.TensorNHWC, int(align / DataTypeSize[tile.math_instruction.element_a]))
B = TensorDescription(tile.math_instruction.element_b, LayoutType.TensorNHWC, int(align / DataTypeSize[tile.math_instruction.element_b]))
C = TensorDescription(output_type, LayoutType.TensorNHWC, max(1, int(align / DataTypeSize[output_type])))
manifest.append(Conv2dOperation(conv_kind, min_cc, tile, A, B, C, tile.math_instruction.element_accumulator))
class EmitConv2dIncludes:
'''Emit includes that are specific to the operation.'''
def __init__(self):
self.includes = ['conv2d_operation.h']
self.emitter_3x = EmitConv3xIncludes()
def operation_is_3x(self, operation) -> bool:
"""Whether operation is a CUTLASS 3 convolution (as opposed to CUTLASS 2)"""
return hasattr(operation, 'is_3x') and operation.is_3x
def emit(self, operation) -> str:
if self.operation_is_3x(operation):
return self.emitter_3x.emit(operation)
return '\n'.join(f"#include \"{incl}\"" for incl in self.includes) + \
"\n\n///////////////////////////////////////////////////////////////////////////////////////////////////"
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitConv2dConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name)
self.instance_emitter = EmitConv2dInstance()
self.includes_emitter = EmitConv2dIncludes()
self.header_template = """
/*
Generated by conv2d_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
"""
self.instance_template = """
${stub_begin}
${operation_instance}
// Derived class
struct ${operation_name} :
public ${operation_name}_base { };
${stub_end}
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.configuration_header = """
namespace cutlass {
namespace library {
// Initialize all instances
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.configuration_instance = """${stub_begin}
using Operation_${operation_name} = cutlass::conv::device::${kernel_name}<
${operation_name}>;
manifest.append(new cutlass::library::${operation_wrapper}<
Operation_${operation_name}
>(
"${operation_name}"
));
${stub_end}
"""
self.configuration_epilogue = "}\n"
self.epilogue_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def operation_is_3x(self, operation):
"""Whether operation is a CUTLASS 3 convolution (as opposed to CUTLASS 2)"""
return hasattr(operation, 'is_3x') and operation.is_3x
def __enter__(self):
"""
Open the configuration_file, and write the "header" C++ code to it.
The "header" consists of a comment (that this is generated code,
so it should not be edited), and includes that are common
to all kinds of kernels.
"""
_LOGGER.debug('*** EmitConv2dConfigurationLibrary::__enter__')
_LOGGER.debug('*** configuration_path (file to write): ' +
str(self.configuration_path))
_LOGGER.debug('*** configuration_name: ' + self.configuration_name)
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(SubstituteTemplate(self.header_template, {
'configuration_name': self.configuration_name
}))
self.operations = []
return self
def emit(self, operation):
"""
Write three pieces of C++ code to the configuration_file
(that was opened by the __enter__ method above):
1. the header includes that are specific to the operation
(CUTLASS 2 vs. CUTLASS 3);
2. the "operation instance" (a "using" declaration ending in "_base"); and
3. the "operation name" (declaration and definition of a derived class
of the above operation instance).
The "using" declaration turns a C++ class name, possibly namespace-qualified,
possibly also with angle brackets, into a C-style, easily demangled identifier.
"""
_LOGGER.debug('*** EmitConv2dConfigurationLibrary::emit')
_LOGGER.debug('*** operation.procedural_name(): ' + operation.procedural_name())
self.operations.append(operation)
self.configuration_file.write(self.includes_emitter.emit(operation))
stub_begin = ''
stub_end = ''
# It can be useful to stub (comment) out instantiations for testing.
# In this case, one need only set is_stub to True.
is_stub = False
if is_stub:
stub_begin = "// STUB for now\n#if 0"
stub_end = '#endif // 0'
self.configuration_file.write(Template(self.instance_template).substitute({
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'operation_instance': self.instance_emitter.emit(operation),
'stub_begin': stub_begin,
'stub_end': stub_end
}))
def __exit__(self, exception_type, exception_value, traceback):
"""
Write the rest of the C++ code to the configuration_file, and close the file.
The "rest of the C++ code" has the following components.
1. Configuration header: Open the namespace(s), and open the definition
of the "initialize_${configuration_name}" registration function
that registers the operation with the Manifest.
("Registration" helps turn C++ compile-time polymorphism
(via template parameters) into a run-time choice of parameters.)
2. Configuration instance: In the body of the registration function,
make a "using" declaration Operation_${operation_name} for the
operation type (which uses operation_name as its template argument).
Then, tell the manifest about the operation via a "manifest.append" call.
The argument of the call is a new instance of
"SomethingOperation<Operation_${operation_name}>"
(replace Something with a specific name).
3. Configuration epilogue: Close the definition of the registration function.
4. Epilogue template: Close the namespace(s).
"""
_LOGGER.debug('*** EmitConv2dConfigurationLibrary::__exit__')
_LOGGER.debug('*** configuration_path (file to write): ' +
str(self.configuration_path))
_LOGGER.debug('*** configuration_name: ' + self.configuration_name)
self.configuration_file.write(SubstituteTemplate(self.configuration_header, {
'configuration_name': self.configuration_name
}))
for operation in self.operations:
stub_begin = ''
stub_end = ''
# It can be useful to stub (comment) out instantiations for testing.
# In this case, one need only set is_stub to True.
is_stub = False
if is_stub:
stub_begin = "// STUB for now\n#if 0"
stub_end = "#endif // 0"
if operation.group_mode == GroupMode.Depthwise:
kernel_name = 'DirectConvolution'
operation_wrapper = 'DirectConv2dOperation'
else:
kernel_name = 'ImplicitGemmConvolution'
operation_wrapper = 'Conv2dOperation'
if self.operation_is_3x(operation):
kernel_name = 'ConvUniversalAdapter'
operation_wrapper = 'ConvOperation3x'
self.configuration_file.write(SubstituteTemplate(self.configuration_instance, {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'kernel_name': kernel_name,
'operation_wrapper': operation_wrapper,
'stub_begin': stub_begin,
'stub_end': stub_end
}))
self.configuration_file.write(self.configuration_epilogue)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
###################################################################################################
| python/cutlass_library/conv2d_operation.py/0 | {
"file_path": "python/cutlass_library/conv2d_operation.py",
"repo_id": "python",
"token_count": 8651
} | 54 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Tests the high-level Conv2d interface
"""
from math import ceil
import unittest
import cutlass
import cutlass.utils.datatypes as datatypes
from cutlass.backend.utils.device import device_cc
from utils import ExpectException
import os
class Conv2dEquivalence:
"""
Helper class for testing the equivalence of different constructions of the Conv2d interface
"""
def __init__(self, conv_kind, element_A, element_B, element_C, element_D, element_accumulator,
alignment_A, alignment_B, alignment_C):
self.element_A = element_A
self.element_B = element_B
self.element_C = element_C
self.element_D = element_D
self.element_accumulator = element_accumulator
self.alignment_A = alignment_A
self.alignment_B = alignment_B
self.alignment_C = alignment_C
self.conv_kind = conv_kind
self.plan = cutlass.op.Conv2d(
kind=self.conv_kind, element_A=element_A, element_B=element_B, element_C=element_C,
element_D=element_D, element_accumulator=element_accumulator)
self.op = self.plan.construct(
alignment_A=self.alignment_A, alignment_B=self.alignment_B,
alignment_C=self.alignment_C)
def _plans_equal(self, other_plan) -> bool:
"""
Compares whether two plans are equal
:param other_plan: plan to compare against the default Conv2d
:type other_plan: cutlass.op.Conv2d
:return: whether `other_plan` is equivalent to `self.plan`
:rtype: bool
"""
other_op = other_plan.construct(
alignment_A=self.alignment_A, alignment_B=self.alignment_B,
alignment_C=self.alignment_C)
return self.op.rt_module.emit() == other_op.rt_module.emit()
def generic_test(self):
"""
Tests the equivalence of various constructions of the Conv2d interface when using CUTLASS data types
and layouts for constructing the Conv2d interface
"""
if not datatypes.is_numpy_available():
return
# Test when specifying all parameters
plan_other = cutlass.op.Conv2d(
kind=self.conv_kind,
element_A=self.element_A, element_B=self.element_B, element_C=self.element_C,
element_D=self.element_D, element_accumulator=self.element_accumulator)
assert self._plans_equal(plan_other)
# Test when specifying all parameters but A
plan_other = cutlass.op.Conv2d(
kind=self.conv_kind,
element_B=self.element_B, element_C=self.element_C,
element_D=self.element_D, element_accumulator=self.element_accumulator,
element=self.element_A)
assert self._plans_equal(plan_other)
# Test when specifying all parameters but A and B as tensors using generic element and output
plan_other = cutlass.op.Conv2d(
kind=self.conv_kind,
element_C=self.element_C,
element_D=self.element_D, element_accumulator=self.element_accumulator,
element=self.element_A)
assert self._plans_equal(plan_other)
# Test without explicit accumulator. Only run if the type of C and the accumulator are equal
if self.element_C == self.element_accumulator:
plan_other = cutlass.op.Conv2d(
kind=self.conv_kind,
element_C=self.element_C,
element_D=self.element_D,
element=self.element_A)
assert self._plans_equal(plan_other)
# Test with only the generic types. Only rune if the types of A, B, C, and D are the same
if (self.element_A == self.element_B and self.element_A == self.element_C and self.element_A == self.element_D
and self.element_A == self.element_accumulator):
plan_other = cutlass.op.Conv2d(kind=self.conv_kind, element=self.element_A)
assert self._plans_equal(plan_other)
def numpy_test(self):
"""
Tests the equivalence of various constructions of the Conv2d interface when using numpy as a frontend
"""
if not datatypes.is_numpy_available():
return
import numpy as np
type_A = datatypes.numpy_type(self.element_A)
type_B = datatypes.numpy_type(self.element_B)
type_C = datatypes.numpy_type(self.element_C)
type_D = datatypes.numpy_type(self.element_D)
type_accum = datatypes.numpy_type(self.element_accumulator)
size = (2, 2)
A = np.zeros(size, dtype=type_A)
B = np.zeros(size, dtype=type_B)
C = np.zeros(size, dtype=type_C)
D = np.zeros(size, dtype=type_D)
return self.tensor_test(type_A, type_B, type_C, type_D, type_accum, A, B, C, D)
def torch_test(self):
"""
Tests the equivalence of various constructions of the Conv2d interface when using torch as a frontend
"""
if not datatypes.is_torch_available():
return
import torch
type_A = datatypes.torch_type(self.element_A)
type_B = datatypes.torch_type(self.element_B)
type_C = datatypes.torch_type(self.element_C)
type_D = datatypes.torch_type(self.element_D)
type_accum = datatypes.torch_type(self.element_accumulator)
size = (2, 2)
A = torch.empty(size, dtype=type_A)
B = torch.empty(size, dtype=type_B)
C = torch.empty(size, dtype=type_C)
D = torch.empty(size, dtype=type_D)
return self.tensor_test(type_A, type_B, type_C, type_D, type_accum, A, B, C, D)
def tensor_test(self, type_A, type_B, type_C, type_D, type_accum, A, B, C, D):
# Test when specifying all parameters via tensors
plan_np = cutlass.op.Conv2d(kind=self.conv_kind, A=A, B=B, C=C, D=D, element_accumulator=type_accum)
assert self._plans_equal(plan_np)
# Test when specifying all parameters but A as tensors
plan_np = cutlass.op.Conv2d(kind=self.conv_kind, B=B, C=C, D=D, element_accumulator=type_accum, element_A=type_A)
assert self._plans_equal(plan_np)
# Test when specifying all parameters but A and B as tensors and using generic element and output
if type_A == type_B:
plan_np = cutlass.op.Conv2d(kind=self.conv_kind, C=C, D=D, element_accumulator=type_accum, element=type_A)
assert self._plans_equal(plan_np)
# Test without explicit accumulator. Only run if the type of C and the accumulator.
if type_C == type_accum:
plan_np = cutlass.op.Conv2d(kind=self.conv_kind, A=A, B=B, C=C, D=D)
assert self._plans_equal(plan_np)
# Test with only the generic types and layouts. Only run if types and layouts of A, B, C, and D are the same.
if (type_A == type_B and type_A == type_C and type_A == type_D and type_A == type_accum):
plan_np = cutlass.op.Conv2d(kind=self.conv_kind, element=type_A)
assert self._plans_equal(plan_np)
def test_all(self):
"""
Runs all tests on the Gemm interface
"""
self.generic_test()
self.numpy_test()
self.torch_test()
@unittest.skipIf(device_cc() <= 80, 'Device compute capability is insufficient for SM80 tests.')
class ConvEquivalenceTest(unittest.TestCase):
"""
Tests the equivalence of different constructions of the Conv2d interface
"""
pass
type2alignment = {
cutlass.DataType.f16: 8,
cutlass.DataType.f32: 4
}
def add_test(conv_kind, element_A, element_B, element_C, element_D, element_accumulator):
test_name = f"test_conv2d_{conv_kind}_{element_A}_{element_B}_{element_C}_{element_D}_{element_accumulator}"
def run(self):
conv2d_eq = Conv2dEquivalence(
conv_kind=conv_kind,
element_A=element_A, element_B=element_B,
element_C=element_C, element_D=element_D,
element_accumulator=element_accumulator,
alignment_A=type2alignment[element_A], alignment_B=type2alignment[element_B],
alignment_C=type2alignment[element_C]
)
conv2d_eq.test_all()
setattr(ConvEquivalenceTest, test_name, run)
for conv_kind in ["fprop", "wgrad", "dgrad"]:
for types in [
[cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16],
[cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32],
[cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f16],
[cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32],
[cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32]
]:
add_test(conv_kind, types[0], types[1], types[2], types[3], types[4])
@unittest.skipIf(device_cc() <= 80, 'Device compute capability is insufficient for SM80 tests.')
class Conv2dErrorTests(unittest.TestCase):
"""
Tests various error scenarios that arise with the high-level Gemm interface
"""
def test_alignment(self):
"""
Tests case in which the alignment specified is unsupported
"""
plan = cutlass.op.Conv2d(kind="fprop", element=cutlass.DataType.f16)
with ExpectException(True, 'Alignment 3 is not supported for F16. The construction should fail.'):
op = plan.construct(alignment_A=3, alignment_B=3, alignment_C=3)
def test_invalid_tile_description(self):
"""
Tests scenarios in which an invalid tile description is provided for a given CC
"""
plan = cutlass.op.Conv2d(kind="fprop", element=cutlass.DataType.f16)
td = plan.tile_descriptions()[0]
td.threadblock_shape=[17, 32, 5]
plan.tile_description = td
with ExpectException(True, 'The threadblock shape is invalid. The compilation should fail.'):
plan.compile()
# Clean up the error message
os.remove("./cutlass_python_compilation_device_error.txt")
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/interface/conv2d_interface.py/0 | {
"file_path": "test/python/cutlass/interface/conv2d_interface.py",
"repo_id": "test",
"token_count": 4946
} | 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implicit GEMM testbed sizes for Conv2d problem
*/
#pragma once
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_types.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
namespace test {
namespace conv {
namespace device {
using Conv3dProblemVector = std::vector<cutlass::conv::Conv3dProblemSize>;
////////////////////////////////////////////////////////////////////////////
/// Structure TestbedConv3dProblemSizes initializes and holds conv default and
/// important network sizes
////////////////////////////////////////////////////////////////////////////
struct TestbedConv3dProblemSizes {
//
// Data members
//
int minimum_channel_size;
Conv3dProblemVector conv3d_default_sizes;
Conv3dProblemVector conv3d_vnet_medical_sizes;
//
// Methods
//
/// Default ctor
TestbedConv3dProblemSizes(int minimum_channel_size_ = 64): minimum_channel_size (minimum_channel_size_) {
initialize_conv3d_default_sizes();
initialize_conv3d_vnet_medical_sizes(conv3d_vnet_medical_sizes, 1 /*batch-size*/);
filter_all();
}
/// Eliminates some illegal cases
void filter_all() {
Conv3dProblemVector *problems_vectors[] = {
&conv3d_default_sizes,
&conv3d_vnet_medical_sizes
};
for (Conv3dProblemVector *problems : problems_vectors) {
Conv3dProblemVector filtered;
for (cutlass::conv::Conv3dProblemSize const & problem : *problems) {
if (!(problem.C % minimum_channel_size)) {
filtered.push_back(problem);
}
}
*problems = filtered;
}
}
// Add a few standard convolution problem sizes
void initialize_conv3d_default_sizes() {
conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize(
{1, 1, 3, 3, minimum_channel_size}, // input size (NDHWC)
{8, 1, 1, 1, minimum_channel_size}, // filter size (KTRSC)
cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize(
{1, 1, 1, 8, minimum_channel_size}, // input size (NDHWC)
{8, 1, 1, 3, minimum_channel_size}, // filter size (KTRSC)
cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize(
{1, 1, 1, 8, minimum_channel_size}, // input size (NDHWC)
{8, 1, 1, 3, minimum_channel_size}, // filter size (KTRSC)
CUTLASS_STL_NAMESPACE::make_tuple(
cutlass::Coord<3>({1, 1, 1}), // near padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({0, 0, 0}) // far padding (pad_d, pad_h, pad_w)
),
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize(
{1, 8, 8, 8, minimum_channel_size}, // input size (NDHWC)
{8, 3, 3, 3, minimum_channel_size}, // filter size (KTRSC)
cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize(
{1, 8, 8, 8, minimum_channel_size}, // input size (NDHWC)
{8, 3, 3, 3, minimum_channel_size}, // filter size (KTRSC)
CUTLASS_STL_NAMESPACE::make_tuple(
cutlass::Coord<3>({1, 1, 1}), // near padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({0, 0, 0}) // far padding (pad_d, pad_h, pad_w)
),
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize(
{1, 16, 16, 16, minimum_channel_size}, // input size (NDHWC)
{8, 3, 3, 3, minimum_channel_size}, // filter size (KTRSC)
cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize(
{1, 1, 15, 19, 160}, // input size (NDHWC)
{224, 1, 3, 6, 160}, // filter size (KTRSC)
cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize(
{1, 2, 1, 1, minimum_channel_size}, // input size (NDHWC)
{8, 2, 1, 1, minimum_channel_size}, // filter size (KTRSC)
cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize(
{1, 1, 7, 7, minimum_channel_size}, // input size (NDHWC)
{16, 1, 3, 3, minimum_channel_size}, // filter size (KTRSC)
cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize(
{1, 11, 15, 19, 64}, // input size (NDHWC)
{32, 4, 3, 6, 64}, // filter size (KTRSC)
cutlass::Coord<3>({2, 1, 3}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
}
// Add vnet layers to unit testing sizes
void initialize_conv3d_vnet_medical_sizes(Conv3dProblemVector &conv3d_problem_vector, int batch_size = 1) {
conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize(
{batch_size, 32, 32, 32, 16}, // input size (NDHWC)
{32, 2, 2, 2, 16}, // filter size (KTRSC)
cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize(
{batch_size, 16, 16, 16, 32}, // input size (NDHWC)
{32, 3, 3, 3, 32}, // filter size (KTRSC)
cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize(
{batch_size, 16, 16, 16, 32}, // input size (NDHWC)
{64, 2, 2, 2, 32}, // filter size (KTRSC)
cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize(
{batch_size, 8, 8, 8, 64}, // input size (NDHWC)
{64, 3, 3, 3, 64}, // filter size (KTRSC)
cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize(
{batch_size, 8, 8, 8, 64}, // input size (NDHWC)
{128, 2, 2, 2, 64}, // filter size (KTRSC)
cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize(
{batch_size, 4, 4, 4, 128}, // input size (NDHWC)
{128, 3, 3, 3, 128}, // filter size (KTRSC)
cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize(
{batch_size, 8, 8, 8, 128}, // input size (NDHWC)
{128, 3, 3, 3, 128}, // filter size (KTRSC)
cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize(
{batch_size, 16, 16, 16, 64}, // input size (NDHWC)
{64, 3, 3, 3, 64}, // filter size (KTRSC)
cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize(
{batch_size, 32, 32, 32, 16}, // input size (NDHWC)
{64, 2, 2, 2, 16}, // filter size (KTRSC)
cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize(
{batch_size, 16, 16, 16, 32}, // input size (NDHWC)
{128, 2, 2, 2, 32}, // filter size (KTRSC)
cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w)
cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w)
cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w)
));
}
};
} // namespace device
} // namespace conv
} // namespace test
| test/unit/conv/device/conv3d_problems.h/0 | {
"file_path": "test/unit/conv/device/conv3d_problems.h",
"repo_id": "test",
"token_count": 6131
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types
and is safe to use in a union.
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/array.h"
#include "cutlass/core_io.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/host_tensor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void convert_bf16_f32(cutlass::bfloat16_t *output, float const *input, int N) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
output[tid] = static_cast<cutlass::bfloat16_t>(input[tid]);
}
}
__global__ void convert_and_pack_bf16(cutlass::bfloat16_t *output, float const *input, int N) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid * 2 < N) {
cutlass::NumericArrayConverter<cutlass::bfloat16_t, float, 2> convert;
cutlass::Array<cutlass::bfloat16_t, 2> *dst_ptr =
reinterpret_cast<cutlass::Array<cutlass::bfloat16_t, 2> *>(output + tid * 2);
cutlass::Array<float, 2> const *src_ptr =
reinterpret_cast<cutlass::Array<float, 2> const *>(input + tid * 2);
*dst_ptr = convert(*src_ptr);
}
}
TEST(bfloat16_t, device_conversion) {
using T = cutlass::bfloat16_t;
using S = float;
int const N = 256;
cutlass::HostTensor<T, cutlass::layout::RowMajor> destination({N, 1});
cutlass::HostTensor<S, cutlass::layout::RowMajor> source({N, 1});
for (int i = 0; i < N; ++i) {
source.at({i, 0}) = float(i - 128);
destination.at({i, 0}) = T(0);
}
source.sync_device();
destination.sync_device();
convert_bf16_f32<<< dim3(1,1), dim3(N, 1) >>>(destination.device_data(), source.device_data(), N);
ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error.";
destination.sync_host();
int errors = 0;
for (int i = 0; i < N; ++i) {
T got = destination.at({i, 0});
S expected = source.at({i, 0});
if (S(got) != expected) {
++errors;
if (errors < 10) {
std::cerr << "Basic conversion error - [" << i << "] - got " << got << ", expected " << expected << "\n";
}
}
destination.at({i, 0}) = T(0);
}
destination.sync_device();
convert_and_pack_bf16<<< dim3(1,1), dim3(N, 1) >>>(destination.device_data(), source.device_data(), N);
ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error.";
destination.sync_host();
for (int i = 0; i < N; ++i) {
T got = destination.at({i, 0});
S expected = source.at({i, 0});
if (S(got) != expected) {
++errors;
if (errors < 10) {
std::cerr << "Convert and pack error - [" << i << "] - got " << got << ", expected " << expected << "\n";
}
}
}
EXPECT_EQ(errors, 0);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Host
//
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(bfloat16_t, host_conversion) {
for (int i = -128; i < 128; ++i) {
float f = static_cast<float>(i);
cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i);
cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(f);
EXPECT_TRUE(static_cast<int>(x) == i);
EXPECT_TRUE(static_cast<float>(y) == f);
}
// Try out default-ctor (zero initialization of primitive proxy type)
EXPECT_TRUE(cutlass::bfloat16_t() == 0.0_bf16);
// Try out user-defined literals
EXPECT_TRUE(cutlass::bfloat16_t(7) == 7_bf16);
EXPECT_TRUE(7 == static_cast<int>(7_bf16));
}
TEST(bfloat16_t, host_arithmetic) {
for (int i = -100; i < 100; ++i) {
for (int j = -100; j < 100; ++j) {
cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i);
cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(j);
EXPECT_TRUE(static_cast<int>(x + y) == (i + j));
}
}
}
TEST(bfloat16_t, host_round) {
struct {
uint32_t f32_bits;
uint16_t expected;
} tests[] = {
{0x40040000, 0x4004}, // M=0, R=0, S=0 => rtz
{0x40048000, 0x4004}, // M=0, R=1, S=0 => rtz
{0x40040001, 0x4004}, // M=0, R=1, S=1 => +inf
{0x4004c000, 0x4005}, // M=0, R=1, S=1 => +inf
{0x4004a000, 0x4005}, // M=0, R=1, S=1 => +inf
{0x40050000, 0x4005}, // M=1, R=0, S=0 => rtz
{0x40054000, 0x4005}, // M=1, R=0, S=1 => rtz
{0x40058000, 0x4006}, // M=1, R=1, S=0 => +inf
{0x40058001, 0x4006}, // M=1, R=1, S=1 => +inf
{0x7f800000, 0x7f80}, // +inf
{0xff800000, 0xff80}, // -inf
{0x7fffffff, 0x7fff}, // canonical NaN
{0x7ff00001, 0x7fff}, // NaN -> canonical NaN
{0xfff00010, 0x7fff}, // Nan -> canonical NaN
{0, 0}
};
bool running = true;
for (int i = 0; running; ++i) {
float f32 = reinterpret_cast<float const &>(tests[i].f32_bits);
cutlass::bfloat16_t bf16 = cutlass::bfloat16_t(f32);
bool passed = (tests[i].expected == bf16.raw());
EXPECT_TRUE(passed)
<< "Error - convert(f32: 0x" << std::hex << tests[i].f32_bits
<< ") -> 0x" << std::hex << tests[i].expected << "\ngot: 0x" << std::hex << bf16.raw();
if (!tests[i].f32_bits) {
running = false;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Device
//
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/core/bfloat16.cu/0 | {
"file_path": "test/unit/core/bfloat16.cu",
"repo_id": "test",
"token_count": 2746
} | 57 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cute/tensor.hpp>
#include "../cooperative_gemm_common.hpp"
using namespace cute;
TEST(SM70_CuTe_Volta, CooperativeGemm1_FloatFMA) {
using value_type = float;
constexpr uint32_t m = 64;
constexpr uint32_t n = 32;
constexpr uint32_t k = 16;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>,
Layout<Shape<_16, _8, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm1_FloatFMA_Predication) {
using value_type = float;
constexpr uint32_t m = 88;
constexpr uint32_t n = 20;
constexpr uint32_t k = 12;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>,
Layout<Shape<_2, _64, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm1_FloatFMA_Predication2) {
using value_type = float;
constexpr uint32_t m = 88;
constexpr uint32_t n = 36;
constexpr uint32_t k = 24;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>,
Layout<Shape<_4, _32, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm1_FloatFMA_Predication3) {
using value_type = float;
constexpr uint32_t m = 67;
constexpr uint32_t n = 13;
constexpr uint32_t k = 11;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>,
Layout<Shape<_1, _128, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm2_DoubleFMA) {
using value_type = double;
constexpr uint32_t m = 16;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>,
Layout<Shape<_16, _8, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm3_Float_FMA_CustomPermutationMNK) {
using value_type = float;
constexpr uint32_t m = 32;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 256;
using tiled_mma_t = TiledMMA<
MMA_Atom<
UniversalFMA<value_type, value_type, value_type, value_type>
>,
Layout<
Shape<_16, _16, _1>
>,
Tile<
Layout<
Shape<_16,_2>, Stride<_2,_1>
>, // 32x32x1 MMA with perm for load vectorization
Layout<
Shape<_16,_2>, Stride<_2,_1>
>,
Underscore
>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm4_Half_MMA) {
using value_type = cutlass::half_t;
constexpr uint32_t m = 32;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>,
Layout<Shape<_4, _4, _1>>
>;
using smem_a_atom_layout_t = typename tiled_mma_t::AtomLayoutB_TV;
using smem_b_atom_layout_t = typename tiled_mma_t::AtomLayoutA_TV;
using smem_c_atom_layout_t = decltype(make_layout(make_shape(Int<m> {}, Int<n> {})));
test_cooperative_gemm_col_major_layout<smem_a_atom_layout_t,
smem_b_atom_layout_t,
smem_c_atom_layout_t,
m,
n,
k,
thread_block_size,
tiled_mma_t,
value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm5_Half_MMA) {
using value_type = cutlass::half_t;
constexpr uint32_t m = 32;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>,
Layout<Shape<_4, _4, _1>>
>;
using gmem_a_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<k>{})));
using gmem_b_layout_t = decltype(make_layout(make_shape(Int<n>{}, Int<k>{}), GenColMajor{}));
using gmem_c_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{})));
using smem_a_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<k>{})));
using smem_b_layout_t = decltype(make_layout(make_shape(Int<n>{}, Int<k>{}), GenColMajor{}));
using smem_c_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{})));
test_cooperative_gemm<gmem_a_layout_t,
gmem_b_layout_t,
gmem_c_layout_t,
smem_a_layout_t,
smem_b_layout_t,
smem_c_layout_t,
AutoVectorizingCopyWithAssumedAlignment<128>, // A
AutoVectorizingCopyWithAssumedAlignment<128>, // B
AutoVectorizingCopyWithAssumedAlignment<128>, // C
thread_block_size,
tiled_mma_t,
128,
value_type,
value_type,
value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm5_Half_MMA_Predicated) {
using value_type = cutlass::half_t;
constexpr uint32_t m = 31;
constexpr uint32_t n = 27;
constexpr uint32_t k = 17;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>,
Layout<Shape<_4, _4, _1>>
>;
using gmem_a_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<k>{})));
using gmem_b_layout_t = decltype(make_layout(make_shape(Int<n>{}, Int<k>{}), GenColMajor{}));
using gmem_c_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{})));
using smem_a_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<k>{})));
using smem_b_layout_t = decltype(make_layout(make_shape(Int<n>{}, Int<k>{}), GenColMajor{}));
using smem_c_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{})));
test_cooperative_gemm<gmem_a_layout_t,
gmem_b_layout_t,
gmem_c_layout_t,
smem_a_layout_t,
smem_b_layout_t,
smem_c_layout_t,
AutoVectorizingCopyWithAssumedAlignment<16>, // A
AutoVectorizingCopyWithAssumedAlignment<16>, // B
AutoVectorizingCopyWithAssumedAlignment<16>, // C
thread_block_size,
tiled_mma_t,
16,
value_type,
value_type,
value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm6_Half_MAA_SwizzledSmemLayouts) {
using value_type = cutlass::half_t;
constexpr uint32_t m = 128;
constexpr uint32_t n = 128;
constexpr uint32_t k = 64;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>,
Layout<Shape<_4, _4, _1>>
>;
using smem_a_atom_layout_t = decltype(
composition(Swizzle<3,3,3>{},
Layout<Shape < _8,_64>,
Stride<_64, _1>>{}));
using smem_b_atom_layout_t = decltype(
composition(Swizzle<3,3,3>{},
Layout<Shape <_64, _8>,
Stride< _1,_64>>{}));
using smem_c_atom_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{}), GenRowMajor{}));
using gmem_a_layout_t = decltype(make_layout(make_shape(Int<m> {}, Int<k> {}), GenRowMajor{}));
using gmem_b_layout_t = decltype(make_layout(make_shape(Int<n> {}, Int<k> {}), GenColMajor{}));
using gmem_c_layout_t = decltype(make_layout(make_shape(Int<m> {}, Int<n> {}), GenRowMajor{}));
using smem_a_atom_layout_t = smem_a_atom_layout_t;
using smem_a_layout_t = decltype(tile_to_shape(
smem_a_atom_layout_t{},
make_shape(shape<0>(gmem_a_layout_t{}), shape<1>(gmem_a_layout_t{})))
);
// Transposed
using smem_b_atom_layout_t = smem_b_atom_layout_t;
using smem_b_layout_t = decltype(tile_to_shape(
smem_b_atom_layout_t{},
make_shape(shape<0>(gmem_b_layout_t{}), shape<1>(gmem_b_layout_t{})))
);
using smem_c_atom_layout_t = smem_c_atom_layout_t;
using smem_c_layout_t = decltype(tile_to_shape(
smem_c_atom_layout_t{},
make_shape(shape<0>(gmem_c_layout_t{}), shape<1>(gmem_c_layout_t{})))
);
test_cooperative_gemm<gmem_a_layout_t,
gmem_b_layout_t,
gmem_c_layout_t,
smem_a_layout_t,
smem_b_layout_t,
smem_c_layout_t,
AutoVectorizingCopyWithAssumedAlignment<128>, // A
AutoVectorizingCopyWithAssumedAlignment<128>, // B
AutoVectorizingCopyWithAssumedAlignment<128>, // C
thread_block_size,
tiled_mma_t,
128,
value_type,
value_type,
value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm7_TransformNegate_FMA) {
using TA = float;
using TB = float;
using TC = double;
constexpr uint32_t m = 32;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<UniversalFMA<TC, TA, TB, TC>>,
Layout<Shape<_16, _8, _1>>
>;
auto aload = cute::negate {};
auto bload = cute::negate {};
auto cload = cute::negate {};
auto cstore = cute::negate {};
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, 64, TA, TB, TC>(
aload, bload, cload, cstore);
}
TEST(SM70_CuTe_Volta, CooperativeGemm7_TransformNegate_MMA) {
using value_type = cutlass::half_t;
constexpr uint32_t m = 32;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>,
Layout<Shape<_4, _4, _1>>
>;
auto aload = cute::negate {};
auto bload = cute::negate {};
auto cload = cute::negate {};
auto cstore = cute::negate {};
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>(
aload, bload, cload, cstore);
}
template<class ConstantType>
struct increment_by_x {
ConstantType x;
template <class T>
CUTE_HOST_DEVICE constexpr
T operator()(const T& arg) const {
return arg + x;
}
};
template<class From, class To>
struct convert_to {
CUTE_HOST_DEVICE constexpr
To operator()(const From& arg) const {
return static_cast<To>(arg);
}
};
TEST(SM70_CuTe_Volta, CooperativeGemm7_TransformCustomOp_FMA) {
using TA = float;
using TB = float;
using TC = double;
constexpr uint32_t m = 32;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<UniversalFMA<TC, TA, TB, TC>>,
Layout<Shape<_16, _8, _1>>
>;
auto aload = increment_by_x<float>{1.111f};
auto bload = convert_to<float, double> {};
auto cload = cute::negate {};
auto cstore = cute::negate {};
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, 64, TA, TB, TC>(
aload, bload, cload, cstore);
}
| test/unit/cute/volta/cooperative_gemm.cu/0 | {
"file_path": "test/unit/cute/volta/cooperative_gemm.cu",
"repo_id": "test",
"token_count": 6602
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/activation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination, device_side_f16_f32_value) {
using Element = float;
using ElementOutput = cutlass::half_t;
int const kCount = 8;
using LinearCombination = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kCount,
Element,
Element>;
Element alpha = Element(2);
Element beta = Element(1);
typename LinearCombination::Params params(alpha, beta);
LinearCombination linear_combination_op(params);
cutlass::Array<ElementOutput, kCount> source;
cutlass::Array<Element, kCount> accum;
for (int i = 0; i < kCount; ++i) {
accum[i] = Element(i * 2);
source[i] = ElementOutput((i * 7 % 9) - 4);
}
cutlass::Array<ElementOutput, kCount> destination = linear_combination_op(accum, source);
for (int i = 0; i < kCount; ++i) {
ElementOutput expected = ElementOutput(
alpha * accum[i] +
beta * Element(ElementOutput(source[i]))
);
ElementOutput got = destination[i];
EXPECT_TRUE(expected == got);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination, device_side_f16_f32_ptr) {
using Element = float;
using ElementOutput = cutlass::half_t;
int const kCount = 8;
using LinearCombination = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kCount,
Element,
Element>;
Element alpha = Element(2);
Element beta = Element(1);
typename LinearCombination::Params params(&alpha, &beta);
LinearCombination linear_combination_op(params);
cutlass::Array<ElementOutput, kCount> source;
cutlass::Array<Element, kCount> accum;
for (int i = 0; i < kCount; ++i) {
accum[i] = Element(i * 2);
source[i] = ElementOutput((i * 7 % 9) - 4);
}
cutlass::Array<ElementOutput, kCount> destination = linear_combination_op(accum, source);
for (int i = 0; i < kCount; ++i) {
ElementOutput expected = ElementOutput(
alpha * accum[i] +
beta * Element(ElementOutput(source[i]))
);
ElementOutput got = destination[i];
EXPECT_TRUE(expected == got);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_gelu, device_side_f16_f16_ptr) {
using Element = cutlass::half_t;
using ElementOutput = cutlass::half_t;
int const kCount = 8;
using LinearCombinationGELU = cutlass::epilogue::thread::LinearCombinationGELU<
ElementOutput,
kCount,
Element,
Element>;
Element alpha = Element(1);
Element beta = Element(0);
typename LinearCombinationGELU::Params params(&alpha, &beta);
LinearCombinationGELU linear_combination_op(params);
cutlass::Array<Element, kCount> accum;
for (int i = 0; i < kCount; ++i) {
accum[i] = Element((float)i * 0.3f);
}
cutlass::Array<ElementOutput, kCount> destination = linear_combination_op(accum, accum);
cutlass::epilogue::thread::GELU<ElementOutput> gelu_func;
for (int i = 0; i < kCount; ++i) {
ElementOutput expected = gelu_func(accum[i]);
ElementOutput got = destination[i];
EXPECT_TRUE(expected == got);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_gelu_taylor, device_side_f16_f16_ptr) {
using Element = cutlass::half_t;
using ElementOutput = cutlass::half_t;
int const kCount = 8;
using LinearCombinationGELU = cutlass::epilogue::thread::LinearCombinationGELU<
ElementOutput,
kCount,
Element,
Element>;
Element alpha = Element(1);
Element beta = Element(0);
typename LinearCombinationGELU::Params params(&alpha, &beta);
LinearCombinationGELU linear_combination_op(params);
cutlass::Array<Element, kCount> accum;
for (int i = 0; i < kCount; ++i) {
accum[i] = Element((float)i * 0.3f);
}
cutlass::Array<ElementOutput, kCount> destination = linear_combination_op(accum, accum);
cutlass::epilogue::thread::GELU<ElementOutput> gelu_func;
for (int i = 0; i < kCount; ++i) {
ElementOutput expected = gelu_func(accum[i]);
ElementOutput got = destination[i];
EXPECT_TRUE(expected == got);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/thread/linear_combination.cu/0 | {
"file_path": "test/unit/epilogue/thread/linear_combination.cu",
"repo_id": "test",
"token_count": 2037
} | 59 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "../../common/cutlass_unit_test.h"
#include "gemm_testbed_3x.hpp"
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_tf32t_tf32n_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_64,_128,_32>;
using ClusterShape_MNK = Shape<_1,_1,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 4,
ElementB, LayoutB, 4,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecialized
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_tf32n_tf32n_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_64,_128,_32>;
using ClusterShape_MNK = Shape<_1,_1,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 4,
ElementB, LayoutB, 4,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecialized
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_tf32t_tf32t_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_64,_128,_32>;
using ClusterShape_MNK = Shape<_1,_1,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 4,
ElementB, LayoutB, 4,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecialized
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::gemm::EpilogueTransposed
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_tf32n_tf32t_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_64,_128,_32>;
using ClusterShape_MNK = Shape<_1,_1,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 4,
ElementB, LayoutB, 4,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecialized
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_tf32t_tf32n_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_64,_128,_32>;
using ClusterShape_MNK = Shape<_4,_2,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 4,
ElementB, LayoutB, 4,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecialized
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_tf32n_tf32n_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_64,_128,_32>;
using ClusterShape_MNK = Shape<_4,_2,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 4,
ElementB, LayoutB, 4,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecialized
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_tf32t_tf32t_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_64,_128,_32>;
using ClusterShape_MNK = Shape<_4,_2,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 4,
ElementB, LayoutB, 4,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecialized
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::gemm::EpilogueTransposed
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_tf32n_tf32t_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_64,_128,_32>;
using ClusterShape_MNK = Shape<_4,_2,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 4,
ElementB, LayoutB, 4,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecialized
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
//////////// CollectiveBuilder with KernelScheduleAuto //////////////////////
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_tf32t_tf32n_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1_auto_schedule) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_64,_128,_32>;
using ClusterShape_MNK = Shape<_4,_2,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 4,
ElementB, LayoutB, 4,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_tf32n_tf32n_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1_auto_schedule) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_64,_128,_32>;
using ClusterShape_MNK = Shape<_4,_2,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 4,
ElementB, LayoutB, 4,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_tf32t_tf32t_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1_auto_schedule) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_64,_128,_32>;
using ClusterShape_MNK = Shape<_4,_2,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 4,
ElementB, LayoutB, 4,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::gemm::EpilogueTransposed
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
TEST(SM90_Device_Gemm_tf32n_tf32t_f32n_tensor_op_gmma_rs_ws_f32, 64x128x32_4x2x1_auto_schedule) {
using ElementA = cutlass::tfloat32_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::tfloat32_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementAccumulator = float;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_64,_128,_32>;
using ClusterShape_MNK = Shape<_4,_2,_1>;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 4,
ElementB, LayoutB, 4,
ElementAccumulator,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
float, LayoutC, 4,
float, LayoutC, 4,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
}
///////////////////////////////////////////////////////////////////////////////
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
| test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_tensor_op_f32_gmma_rs_cluster_warpspecialized.cu/0 | {
"file_path": "test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_tensor_op_f32_gmma_rs_cluster_warpspecialized.cu",
"repo_id": "test",
"token_count": 8066
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
struct GemmWithBroadcastReferenceOp {
using OutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp;
using ElementCompute = typename OutputOp::ElementCompute;
using ElementZ = typename OutputOp::ElementZ;
using ElementT = typename OutputOp::ElementT;
typename OutputOp::BinaryOp binary_op;
typename OutputOp::ElementwiseOp elementwise_op;
GemmWithBroadcastReferenceOp() { }
void operator()(ElementZ &Z, ElementT &T, ElementCompute gemm, ElementCompute bias) {
ElementCompute t_full = binary_op(gemm, bias);
if (OutputOp::kStoreT) {
T = ElementT(t_full);
}
if (OutputOp::kStoreZ) {
ElementCompute z_full = elementwise_op(t_full);
Z = ElementZ(z_full);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Fused testbed
//
// Y = GEMM(AB, C)
//
// T[i, j] = BinaryOp(Y[i, j], Broadcast[i])
//
// Z[i, j] = Elementwise(T[i, j])
//
template <
typename Gemm,
typename ReferenceOp = GemmWithBroadcastReferenceOp<Gemm>
>
struct TestbedGemmWithBroadcast {
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using OutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename OutputOp::ElementCompute;
using ElementVector = typename OutputOp::ElementVector;
using ElementZ = typename OutputOp::ElementZ;
using ElementT = typename OutputOp::ElementT;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A; // Input A
cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB> tensor_B; // Input B
cutlass::HostTensor<ElementC, typename Gemm::LayoutC> tensor_C; // Input C
cutlass::HostTensor<ElementVector, typename Gemm::LayoutC> tensor_Broadcast; // Input Broadcast
cutlass::HostTensor<ElementZ, typename Gemm::LayoutC> tensor_Z;
cutlass::HostTensor<ElementT, typename Gemm::LayoutC> tensor_T;
cutlass::HostTensor<ElementAccumulator, typename Gemm::LayoutC> tensor_C_ref;
cutlass::HostTensor<ElementAccumulator, typename Gemm::LayoutC> tensor_Y_ref;
cutlass::HostTensor<ElementZ, typename Gemm::LayoutC> tensor_Z_ref;
cutlass::HostTensor<ElementT, typename Gemm::LayoutC> tensor_T_ref;
//
// Methods
//
TestbedGemmWithBroadcast(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the GEMM workspace
//
tensor_A.resize(problem_size.mk());
tensor_B.resize(problem_size.kn());
tensor_C.resize(problem_size.mn());
tensor_Z.resize(problem_size.mn());
tensor_T.resize(problem_size.mn());
tensor_Broadcast.resize({
problem_size.m(),
1
});
tensor_C_ref.resize(problem_size.mn());
tensor_Y_ref.resize(problem_size.mn());
tensor_Z_ref.resize(problem_size.mn());
tensor_T_ref.resize(problem_size.mn());
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
EXPECT_TRUE(initialize_tensor(tensor_Broadcast.host_view(), init_C, seed + 2020));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename Gemm::ElementA(1);
tensor_B.host_view().at({0, 0}) = typename Gemm::ElementB(1);
tensor_C.host_view().at({0, 0}) = typename Gemm::ElementC(1);
for (int m = 0; m < tensor_C_ref.extent().row(); ++m) {
for (int n = 0; n < tensor_C_ref.extent().column(); ++n) {
tensor_C_ref.at({m, n}) = ElementAccumulator(tensor_C.at({m, n}));
}
}
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_Broadcast.sync_device();
tensor_Z.sync_device();
tensor_T.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementAccumulator alpha,
ElementAccumulator beta) {
tensor_Z.sync_host();
tensor_T.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
if (OutputOp::kStoreZ) {
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Z.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Z_ref.host_view()), 0);
}
if (OutputOp::kStoreT) {
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_T.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_T_ref.host_view()), 0);
}
bool passed = true;
float norm_diff = 0;
if (OutputOp::kStoreZ) {
norm_diff = cutlass::reference::host::TensorNormDiff(tensor_Z_ref.host_view(), tensor_Z.host_view(), float());
passed = (norm_diff <= 0.1f);
EXPECT_LT(norm_diff, 0.1f) << " tensor_Z is incorrect";
}
if (OutputOp::kStoreT) {
norm_diff = cutlass::reference::host::TensorNormDiff(tensor_T_ref.host_view(), tensor_T.host_view(), float());
passed = (passed && (norm_diff <= 0.1f));
EXPECT_LT(norm_diff, 0.1f) << " tensor_T is incorrect";
}
if (!passed) {
/*
std::stringstream fname;
fname << "error_Gemm_device_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Gemm::ThreadblockShape::kM << "x"
<< Gemm::ThreadblockShape::kN << "x"
<< Gemm::ThreadblockShape::kK << "_"
<< Gemm::WarpShape::kM << "x"
<< Gemm::WarpShape::kN << "x"
<< Gemm::WarpShape::kK << ".txt";
std::ofstream file(fname.str());
*/
std::ofstream file("errors_testbed_gemm_with_broadcast.txt");
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\nZ =\n" << tensor_Z.host_view()
<< "\nT =\n" << tensor_T.host_view()
<< "\n\n"
<< "\nY_ref =\n" << tensor_Y_ref.host_view()
<< "\nZ_ref =\n" << tensor_Z_ref.host_view()
<< "\nT_ref =\n" << tensor_T_ref.host_view();
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementAccumulator alpha,
ElementAccumulator beta) {
//
// Verify
//
cutlass::reference::host::GemmComplex<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
ElementAccumulator, typename Gemm::LayoutC,
ElementAccumulator, ElementAccumulator
>(
problem_size,
alpha,
tensor_A.host_ref(),
Gemm::kTransformA,
tensor_B.host_ref(),
Gemm::kTransformB,
beta,
tensor_C_ref.host_ref(),
tensor_Y_ref.host_ref(),
ElementAccumulator(0)
);
using ElementC = typename Gemm::ElementC;
ReferenceOp reference_op;
// compute tensor Z and tensor T
for (int m = 0; m < problem_size.m(); ++m) {
for (int n = 0; n < problem_size.n(); ++n) {
ElementZ z;
ElementT t;
reference_op(z, t, tensor_Y_ref.at({m, n}), tensor_Broadcast.at({m, 0}));
if (OutputOp::kStoreZ) {
tensor_Z_ref.at({m, n}) = z;
}
if (OutputOp::kStoreT) {
tensor_T_ref.at({m, n}) = t;
}
}
}
return compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmUniversalMode mode,
cutlass::gemm::GemmCoord problem_size,
int batch_count = 1,
ElementAccumulator alpha = ElementAccumulator(1),
ElementAccumulator beta = ElementAccumulator(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
mode,
problem_size,
batch_count,
{alpha, beta},
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data(),
tensor_Z.device_data(),
tensor_Broadcast.device_data(),
tensor_T.device_data(),
problem_size.m() * problem_size.k(),
problem_size.n() * problem_size.k(),
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
problem_size.m(),
problem_size.m() * problem_size.n(),
tensor_A.layout().stride(0),
tensor_B.layout().stride(0),
tensor_C.layout().stride(0),
tensor_Z.layout().stride(0),
0, // This must be zero
tensor_T.layout().stride(0),
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = true;
passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Failed with batch_count/split_k_slices = " << batch_count << std::endl;
}
//
// Profile
//
#if 0 // profiling disabled for now.
int const kWorkspaces = 100;
cutlass::DeviceAllocation<typename Gemm::ElementA> profiling_tensor_A(tensor_A.capacity() * kWorkspaces);
cutlass::DeviceAllocation<typename Gemm::ElementB> profiling_tensor_B(tensor_B.capacity() * kWorkspaces);
cutlass::DeviceAllocation<ElementC> profiling_tensor_C(tensor_C.capacity() * kWorkspaces);
cutlass::DeviceAllocation<ElementC> profiling_tensor_Broadcast(tensor_Broadcast.capacity() * kWorkspaces);
cutlass::DeviceAllocation<ElementZ> profiling_tensor_Z(tensor_Z.capacity() * kWorkspaces);
cutlass::DeviceAllocation<ElementT> profiling_tensor_T(tensor_T.capacity() * kWorkspaces);
cudaEvent_t events[2];
for (auto & event : events) {
cudaError_t result = cudaEventCreate(&event);
if (result != cudaSuccess) {
EXPECT_EQ(result, cudaSuccess) << " cudaEventCreate() failed with error " << cudaGetErrorString(result);
return false;
break;
}
}
int const kWarmupIterations = 5;
int const kProfilingIterations = 100;
for (int i = 0; i < kWarmupIterations; ++i) {
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
}
cudaError_t result = cudaEventRecord(events[0]);
EXPECT_EQ(result, cudaSuccess);
for (int i = 0; i < kProfilingIterations; ++i) {
typename Gemm::Arguments arguments{
mode,
problem_size,
batch_count,
{alpha, beta},
profiling_tensor_A.get() + tensor_A.capacity() * (i % kWorkspaces),
profiling_tensor_B.get() + tensor_B.capacity() * (i % kWorkspaces),
profiling_tensor_C.get() + tensor_C.capacity() * (i % kWorkspaces),
profiling_tensor_Z.get() + tensor_Z.capacity() * (i % kWorkspaces),
profiling_tensor_Broadcast.get() + tensor_Broadcast.capacity() * (i % kWorkspaces),
profiling_tensor_T.get() + tensor_T.capacity() * (i % kWorkspaces),
problem_size.m() * problem_size.k(),
problem_size.n() * problem_size.k(),
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
problem_size.m(),
problem_size.m() * problem_size.n(),
tensor_A.layout().stride(0),
tensor_B.layout().stride(0),
tensor_C.layout().stride(0),
tensor_Z.layout().stride(0),
0, // This must be zero
tensor_T.layout().stride(0),
};
gemm_op.initialize(arguments, workspace.get());
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
}
result = cudaEventRecord(events[1]);
EXPECT_EQ(result, cudaSuccess);
result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess);
float elapsed_time = 0;
result = cudaEventElapsedTime(&elapsed_time, events[0], events[1]);
EXPECT_EQ(result, cudaSuccess);
double average_time = double(elapsed_time) / double(kProfilingIterations);
std::cout << problem_size << ": " << average_time << " ms" << std::endl;
for (auto & event : events) {
cudaEventDestroy(event);
}
#endif
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Gemm,
typename ReferenceOp = GemmWithBroadcastReferenceOp<Gemm>
>
bool TestGemmWithBroadcast(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmUniversalMode mode,
int batch_count,
double alpha = 1.0,
double beta = 2.0) {
bool passed = true;
TestbedGemmWithBroadcast<Gemm, ReferenceOp> testbed;
using ElementAccumulator = typename Gemm::ElementAccumulator;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementAccumulator>(alpha),
cutlass::from_real<ElementAccumulator>(beta)
);
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Gemm,
typename ReferenceOp = GemmWithBroadcastReferenceOp<Gemm>
>
bool TestAllGemmWithBroadcast() {
int M_problems[] = {8, 136, 264, 520};
int N_problems[] = {8, 136, 264, 520};
int K_problems[] = {8, 136, 264, 520};
double alpha_problems[] = {1.25, 2.25};
double beta_problems[] = {0, 1, 2.0};
bool passed = true;
for (int M : M_problems) {
for (int N : N_problems) {
for (int K : K_problems) {
for (double alpha : alpha_problems) {
for (double beta : beta_problems) {
TestbedGemmWithBroadcast<Gemm, ReferenceOp> testbed;
using ElementAccumulator = typename Gemm::ElementAccumulator;
passed = testbed.run(
cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, K},
1,
cutlass::from_real<ElementAccumulator>(alpha),
cutlass::from_real<ElementAccumulator>(beta)
);
EXPECT_TRUE(passed)
<< "M: " << M << ", N: " << N << ", K: " << K << ", alpha: " << alpha << ", beta: " << beta;
if (!passed) {
return passed;
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_gemm_with_broadcast.h/0 | {
"file_path": "test/unit/gemm/device/testbed_gemm_with_broadcast.h",
"repo_id": "test",
"token_count": 8380
} | 61 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for threadblock level GEMV
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/gemm/threadblock/gemv.h"
#include "cutlass/gemm/threadblock/default_gemv_core.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemv, typename LongIndex, typename RefA, typename RefB, typename RefC>
__global__ void batched_gemv_threadblock_test_kernel(
cutlass::gemm::GemmCoord problem_size,
LongIndex stride_a,
LongIndex stride_b,
LongIndex stride_c,
RefA ref_A,
RefB ref_B,
RefC ref_C
) {
typename Gemv::IteratorA::TensorCoord threadblock_offset_A(0, 0);
typename Gemv::IteratorB::TensorCoord threadblock_offset_B(0, 0);
typename Gemv::IteratorB::TensorCoord threadblock_offset_C(0, 0);
// Move to the right batches for these threads
ref_A.add_pointer_offset(threadIdx.y * stride_a);
ref_B.add_pointer_offset(threadIdx.y * stride_b);
ref_C.add_pointer_offset(threadIdx.y * stride_c);
// Construct iterators to A and B operands
typename Gemv::IteratorA::Params params_A(ref_A.layout());
typename Gemv::IteratorA iterator_A(params_A, ref_A.data(), { problem_size.m(), problem_size.k() }, 0, threadblock_offset_A);
typename Gemv::IteratorB::Params params_B(ref_B.layout());
typename Gemv::IteratorB iterator_B(params_B, ref_B.data(), { problem_size.k(), problem_size.n() }, threadIdx.x, threadblock_offset_B);
Gemv gemv;
typename Gemv::FragmentC accum;
accum.clear();
// Compute threadblock-scoped matrix multiply-add
gemv(problem_size, accum, iterator_A, iterator_B, accum);
// IteratorC is PitchLinear<> assumes n() contiguous
typename Gemv::IteratorC::Params params_C(ref_C.layout());
typename Gemv::IteratorC iterator_C(params_C, ref_C.data(), { problem_size.m(), problem_size.n() }, threadIdx.x, threadblock_offset_C);
iterator_C.store(accum);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template<typename Shape_,
typename ElementAB_,
typename ElementC_,
typename LayoutA_,
typename LayoutB_,
typename LayoutC_,
int THREAD_N,
int THREAD_K,
int MAX_THREADS_PER_BLOCK=512,
bool DEBUG=false>
void batched_gemv_threadblock_test(cutlass::gemm::GemmCoord problem_size, int num_batch)
{
using Shape = Shape_;
using ElementA = ElementAB_;
using LayoutA = LayoutA_;
using ElementB = ElementAB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ThreadShape = cutlass::gemm::GemmShape<1, THREAD_N, THREAD_K>;
using Core = typename cutlass::gemm::threadblock::DefaultGemvCore<
Shape,
ThreadShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC
>;
if (DEBUG)
{
num_batch = 1;
}
using Mma = cutlass::gemm::threadblock::Gemv<Core>;
// Create host tensors that will be the backing store for the batches
// Note that no device memory is initially allocated
cutlass::HostTensor<ElementA, LayoutA> matrix_A({problem_size.m(), problem_size.k()}, false);
cutlass::HostTensor<ElementB, LayoutB> matrix_B({problem_size.k(), problem_size.n()}, false);
cutlass::HostTensor<ElementC, LayoutC> matrix_C_computed({problem_size.m(), problem_size.n()}, false);
cutlass::HostTensor<ElementC, LayoutC> matrix_C_reference({problem_size.m(), problem_size.n()}, false);
// Reserve memory for the batch of tensors
matrix_A.reserve(problem_size.m()*problem_size.k()*num_batch);
matrix_B.reserve(problem_size.n()*problem_size.k()*num_batch);
matrix_C_computed.reserve(problem_size.m()*problem_size.n()*num_batch);
matrix_C_reference.reserve(problem_size.m()*problem_size.n()*num_batch, false);
// Fill eatch tensor batch
const int seed = 6834;
for (int b = 0; b < num_batch; b++)
{
if(DEBUG)
{
cutlass::reference::host::BlockFillSequential(
matrix_A.host_data_ptr_offset(b*matrix_A.capacity()), matrix_A.capacity());
cutlass::reference::host::BlockFillSequential(
matrix_B.host_data_ptr_offset(b*matrix_B.capacity()), matrix_B.capacity());
}
else
{
cutlass::reference::host::TensorFillRandomUniform(
matrix_A.host_view(b*matrix_A.capacity()),
seed + 1660,
8,
-8,
0
);
cutlass::reference::host::TensorFillRandomUniform(
matrix_B.host_view(b*matrix_B.capacity()),
seed + 1880,
8,
-8,
0
);
}
cutlass::reference::host::TensorFill(matrix_C_computed.host_view(b*matrix_C_computed.capacity()));
cutlass::reference::host::TensorFill(matrix_C_reference.host_view(b*matrix_C_reference.capacity()));
}
matrix_A.sync_device();
matrix_B.sync_device();
matrix_C_computed.sync_device();
dim3 grid(1, 1); // only 1 CTA is used
dim3 block(Shape::kN / THREAD_N, num_batch, 1);
#if 0
printf("block dim = %d x %d\n", block.x, block.y);
#endif
// Some sanity checks
EXPECT_TRUE( problem_size.n() % THREAD_N == 0 );
EXPECT_TRUE( block.x*block.y <= MAX_THREADS_PER_BLOCK );
test::gemm::threadblock::batched_gemv_threadblock_test_kernel<Mma><<< grid, block >>>(
problem_size,
matrix_A.capacity(),
matrix_B.capacity(),
matrix_C_computed.capacity(),
matrix_A.device_ref(),
matrix_B.device_ref(),
matrix_C_computed.device_ref()
);
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " kernel error: " << cudaGetErrorString(result);
matrix_C_computed.sync_host();
// Compute the batched gemms
for (int b = 0; b < num_batch; b++)
{
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC> reference_gemm;
reference_gemm(
problem_size.mnk(),
ElementC(1),
matrix_A.host_ref(b*matrix_A.capacity()),
matrix_B.host_ref(b*matrix_B.capacity()),
ElementC(0),
matrix_C_reference.host_ref(b*matrix_C_computed.capacity())
);
bool passed = cutlass::reference::host::TensorEquals(
matrix_C_computed.host_view(b*matrix_C_computed.capacity()),
matrix_C_reference.host_view(b*matrix_C_reference.capacity()));
EXPECT_TRUE(passed)
//<< "A:\n" << matrix_A.host_view() << "\n"
//<< "B:\n" << matrix_B.host_view() << "\n"
<< "Batch: " << b << "\n"
<< "Reference:\n" << matrix_C_reference.host_view(b*matrix_C_reference.capacity()) << "\n"
<< "Computed:\n" << matrix_C_computed.host_view(b*matrix_C_computed.capacity()) << "\n";
}
}
} // namespace threadblock
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
// A: ColumnMajor
// B: RowMajor
// C: ColumnMajor
TEST(SM50_batched_gemv_threadblock, 4x1x64x64_crc_fp32_fp32_2N_2K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 64, 64);
const int num_batch = 4;
const int THREAD_N = 2;
const int THREAD_K = 2;
using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>;
batched_gemv_threadblock_test<Shape, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 5x1x128x128_crc_fp32_fp32_4N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 128, 128);
const int num_batch = 5;
const int THREAD_N = 4;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>;
batched_gemv_threadblock_test<Shape, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 16x1x17x64_crc_fp32_fp32_1N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 17, 64);
const int num_batch = 16;
const int THREAD_N = 1;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>;
batched_gemv_threadblock_test<Shape,
float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 4x1x64x64_crc_fp16_fp32_2N_2K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 64, 64);
const int num_batch = 4;
const int THREAD_N = 2;
const int THREAD_K = 2;
using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>;
batched_gemv_threadblock_test<Shape,
cutlass::half_t, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 4x1x64x64_crc_fp16_fp32_2N_8K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 64, 64);
const int num_batch = 4;
const int THREAD_N = 2;
const int THREAD_K = 8;
using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>;
batched_gemv_threadblock_test<Shape,
cutlass::half_t, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 16x1x17x64_crc_fp16_fp32_1N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 17, 64);
const int num_batch = 16;
const int THREAD_N = 1;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>;
batched_gemv_threadblock_test<Shape,
cutlass::half_t, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 4x1x64x64_crc_i8_i32_2N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 64, 64);
const int num_batch = 4;
const int THREAD_N = 2;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>;
batched_gemv_threadblock_test<Shape,
int8_t, int32_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 16x1x17x64_crc_i8_i32_1N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 17, 64);
const int num_batch = 16;
const int THREAD_N = 1;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>;
batched_gemv_threadblock_test<Shape,
int8_t, int32_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
// A: RowMajor
// B: ColumnMajor
// C: RowMajor
TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcr_fp32_fp32_2N_2K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 64, 64);
const int num_batch = 4;
const int THREAD_N = 2;
const int THREAD_K = 2;
using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>;
batched_gemv_threadblock_test<Shape, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 5x1x128x128_rcr_fp32_fp32_4N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 128, 128);
const int num_batch = 5;
const int THREAD_N = 4;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>;
batched_gemv_threadblock_test<Shape, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcr_fp32_fp32_1N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 17, 64);
const int num_batch = 16;
const int THREAD_N = 1;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>;
batched_gemv_threadblock_test<Shape,
float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcr_fp16_fp32_2N_2K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 64, 64);
const int num_batch = 4;
const int THREAD_N = 2;
const int THREAD_K = 2;
using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>;
batched_gemv_threadblock_test<Shape,
cutlass::half_t, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcr_fp16_fp32_2N_8K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 64, 64);
const int num_batch = 4;
const int THREAD_N = 2;
const int THREAD_K = 8;
using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>;
batched_gemv_threadblock_test<Shape,
cutlass::half_t, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcr_fp16_fp32_1N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 17, 64);
const int num_batch = 16;
const int THREAD_N = 1;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>;
batched_gemv_threadblock_test<Shape,
cutlass::half_t, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcr_i8_i32_2N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 64, 64);
const int num_batch = 4;
const int THREAD_N = 2;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>;
batched_gemv_threadblock_test<Shape,
int8_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcr_i8_i32_1N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 17, 64);
const int num_batch = 16;
const int THREAD_N = 1;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>;
batched_gemv_threadblock_test<Shape,
int8_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
// A: RowMajor
// B: ColumnMajor
// C: ColumnMajor
TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcc_fp32_fp32_2N_2K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 64, 64);
const int num_batch = 4;
const int THREAD_N = 2;
const int THREAD_K = 2;
using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>;
batched_gemv_threadblock_test<Shape, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 5x1x128x128_rcc_fp32_fp32_4N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 128, 128);
const int num_batch = 5;
const int THREAD_N = 4;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>;
batched_gemv_threadblock_test<Shape, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcc_fp32_fp32_1N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 17, 64);
const int num_batch = 16;
const int THREAD_N = 1;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>;
batched_gemv_threadblock_test<Shape,
float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcc_fp16_fp32_2N_2K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 64, 64);
const int num_batch = 4;
const int THREAD_N = 2;
const int THREAD_K = 2;
using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>;
batched_gemv_threadblock_test<Shape,
cutlass::half_t, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcc_fp16_fp32_2N_8K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 64, 64);
const int num_batch = 4;
const int THREAD_N = 2;
const int THREAD_K = 8;
using Shape = cutlass::gemm::GemmShape<1, 64, THREAD_K>;
batched_gemv_threadblock_test<Shape,
cutlass::half_t, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcc_fp16_fp32_1N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 17, 64);
const int num_batch = 16;
const int THREAD_N = 1;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>;
batched_gemv_threadblock_test<Shape,
cutlass::half_t, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 4x1x64x64_rcc_i8_i32_2N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 64, 64);
const int num_batch = 4;
const int THREAD_N = 2;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 128, THREAD_K>;
batched_gemv_threadblock_test<Shape,
int8_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
TEST(SM50_batched_gemv_threadblock, 16x1x17x64_rcc_i8_i32_1N_4K) {
using namespace test::gemm::threadblock;
cutlass::gemm::GemmCoord problem_size(1, 17, 64);
const int num_batch = 16;
const int THREAD_N = 1;
const int THREAD_K = 4;
using Shape = cutlass::gemm::GemmShape<1, 32, THREAD_K>;
batched_gemv_threadblock_test<Shape,
int8_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
THREAD_N, THREAD_K>(problem_size, num_batch);
}
| test/unit/gemm/threadblock/batched_gemv.cu/0 | {
"file_path": "test/unit/gemm/threadblock/batched_gemv.cu",
"repo_id": "test",
"token_count": 11925
} | 62 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "cutlass/arch/wmma.h"
#ifdef CUTLASS_ARCH_WMMA_SM75_ENABLED
#include "mma_pipelined_testbed.h"
#include "cutlass/gemm/threadblock/default_mma_core_wmma.h"
/// All tests use double-buffered (kStages=2) mma pipeline for the gemm mainloop
/// Test name format: SM[arch]_gemm_threadblock_wmma_tensor_op_[alayout]_[blayout]_[clayout]_[atype].[threadblock_shape]_[warp_shape]_[instruction_shape]
/////////////////////////////////////////////////////////////////////////
/// Integer (s8 and u8) WMMA threadblock level tests /////
/////////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_ARCH_INTEGER_MATRIX_MULTIPLY_ENABLED)
TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_row_s8, 64x64x32_64x64x32_16x16x16) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int32_t;
using LayoutC = cutlass::layout::RowMajor;
static const int kStages = 2;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>;
float alpha = 1.f;
float beta = 0.0f;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
cutlass::arch::OpClassWmmaTensorOp, kStages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_row_s8, 64x64x64_64x64x64_16x16x16) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int32_t;
using LayoutC = cutlass::layout::RowMajor;
static const int kStages = 2;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>;
float alpha = 1.f;
float beta = 0.0f;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
cutlass::arch::OpClassWmmaTensorOp, kStages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
TEST(SM75_gemm_threadblock_wmma_tensor_op_col_row_row_s8, 64x64x32_64x64x32_16x16x16) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = int32_t;
using LayoutC = cutlass::layout::RowMajor;
static const int kStages = 2;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>;
float alpha = 1.f;
float beta = 0.0f;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
cutlass::arch::OpClassWmmaTensorOp, kStages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
TEST(SM75_gemm_threadblock_wmma_tensor_op_col_row_row_s8, 64x64x64_64x64x64_16x16x16) {
using ElementA = int8_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = int8_t;
using LayoutB = cutlass::layout::RowMajor;
using ElementC = int32_t;
using LayoutC = cutlass::layout::RowMajor;
static const int kStages = 2;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>;
float alpha = 1.f;
float beta = 0.0f;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
cutlass::arch::OpClassWmmaTensorOp, kStages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
#endif //CUTLASS_ARCH_INTEGER_MATRIX_MULTIPLY_ENABLED
////////////////////////////////////////////////////////////////////////
/// SUBBYTE (s4 and b1) WMMA threadblock level tests ////
///////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED)
TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_row_s4, 64x64x128_64x64x128_8x8x32) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int32_t;
using LayoutC = cutlass::layout::RowMajor;
static const int kStages = 2;
cutlass::gemm::GemmCoord problem_size(64, 64, 128);
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 128>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
float alpha = 1.f;
float beta = 0.f;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadBlockShape, WarpShape, InstructionShape,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
cutlass::arch::OpClassWmmaTensorOp, kStages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_col_s4, 64x64x64_64x64x64_8x8x32) {
using ElementA = cutlass::int4b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::int4b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int32_t;
using LayoutC = cutlass::layout::ColumnMajor;
static const int kStages = 2;
cutlass::gemm::GemmCoord problem_size(64, 64, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
float alpha = 1.f;
float beta = 0.f;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadBlockShape, WarpShape, InstructionShape,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
cutlass::arch::OpClassWmmaTensorOp, kStages>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_row_b1, 64x64x512_64x64x512_8x8x128) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int32_t;
using LayoutC = cutlass::layout::RowMajor;
static const int kStages = 2;
cutlass::gemm::GemmCoord problem_size(64, 64, 2048);
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>;
float alpha = 1.f;
float beta = 0.f;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadBlockShape, WarpShape, InstructionShape,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
cutlass::arch::OpClassWmmaTensorOp, kStages,
cutlass::arch::OpXorPopc>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
TEST(SM75_gemm_threadblock_wmma_tensor_op_row_col_col_b1, 64x64x512_64x64x512_8x8x128) {
using ElementA = cutlass::uint1b_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementB = cutlass::uint1b_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = int32_t;
using LayoutC = cutlass::layout::ColumnMajor;
static const int kStages = 2;
cutlass::gemm::GemmCoord problem_size(64, 64, 2048);
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>;
float alpha = 1.f;
float beta = 0.f;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadBlockShape, WarpShape, InstructionShape,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
cutlass::arch::OpClassWmmaTensorOp, kStages,
cutlass::arch::OpXorPopc>;
dim3 grid(1, 1);
dim3 block(32, 1, 1);
test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(),
problem_size.k(), alpha, beta)
.run(grid, block);
}
#endif //CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED
#endif //CUTLASS_ARCH_WMMA_SM75_ENABLED
| test/unit/gemm/threadblock/mma_pipelined_wmma_sm75.cu/0 | {
"file_path": "test/unit/gemm/threadblock/mma_pipelined_wmma_sm75.cu",
"repo_id": "test",
"token_count": 4841
} | 63 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for TensorReduce family of device-wide operators
*/
#include <iostream>
#include <limits>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/reduction/device/tensor_reduce.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This reduces the W dimension, transforming an NHWC tensor into NHWC with W=1.
template <
typename TensorReduction,
typename ElementCompute = typename TensorReduction::ElementCompute
>
bool TestAllReduction_NHWC_reduce_w(ElementCompute reduction_identity = ElementCompute()) {
using Layout = typename TensorReduction::Layout;
using ElementOutput = typename TensorReduction::ElementOutput;
using ElementSource = typename TensorReduction::ElementSource;
int const kV = TensorReduction::kVectorLength;
int const N_indices[] = {1, 2, 5, 10};
int const H_indices[] = {1, 3, 9 };
int const W_indices[] = {1, 5, 19, 40, 224};
int const C_indices[] = {
kV,
2 * kV,
5 * kV,
9 * kV,
17 * kV,
39 * kV,
257 * kV,
kV * 760
};
using Element = int;
for (int N : N_indices) {
for (int H : H_indices) {
for (int W : W_indices) {
for (int C : C_indices) {
cutlass::HostTensor<ElementSource, Layout> src_tensor({N, H, W, C});
cutlass::HostTensor<ElementOutput, Layout> dst_tensor({N, H, 1, C});
cutlass::reference::host::TensorFillRandomUniform(
src_tensor.host_view(), 17, 10, -10, 0);
cutlass::reference::host::BlockFillSequential(
dst_tensor.host_data(), dst_tensor.capacity());
dst_tensor.sync_device();
src_tensor.sync_device();
// Execute a tensor reduction over rank 2 (the 'W' dimension is reduced; NHWC => NHC)
TensorReduction reduction(src_tensor.extent(), 2);
cutlass::DeviceAllocation<uint8_t> device_workspace(reduction.workspace_size());
cutlass::Status status = reduction.reduce(
dst_tensor.device_ref(),
src_tensor.device_ref(),
device_workspace.get(),
reduction_identity
);
EXPECT_EQ(status, cutlass::Status::kSuccess);
EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess);
// Reference check
dst_tensor.sync_host();
typename TensorReduction::ReductionOp reduction_op;
for (int n = 0; n < src_tensor.extent().n(); ++n) {
for (int h = 0; h < src_tensor.extent().h(); ++h) {
for (int c = 0; c < src_tensor.extent().c(); ++c) {
ElementCompute w_accum = reduction_identity;
for (int w = 0; w < src_tensor.extent().w(); ++w) {
w_accum = reduction_op(w_accum, ElementCompute(src_tensor.at({n, h, w, c})));
}
ElementCompute got = ElementCompute(dst_tensor.at({n, h, 0, c}));
bool equal = (w_accum == got);
EXPECT_TRUE(equal);
if (!equal) {
std::cerr
<< "Error at location (" << n << ", " << h << ", 0, " << c << ")" << std::endl;
std::cerr
<< " expected: " << w_accum << std::endl
<< " got: " << got << std::endl;
std::cerr
<< "Problem: " << src_tensor.extent() << " -> "
<< dst_tensor.extent() << std::endl;
std::cerr
<< " Grid: " << reduction.reduction_strided.grid_shape
<< "\n Block: " << reduction.reduction_strided.threadblock_shape << std::endl
<< " Final: " << reduction.reduction_strided.grid_final
<< "\n Block: " << reduction.reduction_strided.threadblock_final << "\n";
return false;
}
}
}
}
}
}
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x8_f16x8) {
int const kV = 8;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>());
}
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x2_f16x2) {
int const kV = 2;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>());
}
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x1_f16x1) {
int const kV = 1;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>());
}
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_reduce_w_s32x4) {
int const kV = 4;
using Element = int;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::plus<Element>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
Element,
Element,
Layout,
Functor,
kV,
Element
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>());
}
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_reduce_w_cf32) {
int const kV = 1;
using ElementOutput = cutlass::complex<float>;
using ElementSource = cutlass::complex<float>;
using ElementCompute = cutlass::complex<float>;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_maximum_w_cf32) {
int const kV = 1;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::maximum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>( -std::numeric_limits<float>::max() ));
}
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_minimum_w_cf32) {
int const kV = 1;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::minimum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(std::numeric_limits<float>::max()));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_XOR_w_u32) {
int const kV = 1;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::bit_xor<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_AND_w_s32) {
int const kV = 1;
using ElementOutput = unsigned;
using ElementSource = unsigned;
using ElementCompute = unsigned;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::bit_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(0xffffffff));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_OR_w_u32) {
int const kV = 1;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::bit_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_ANY_w_s32) {
int const kV = 1;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(0)));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_ALL_w_s32) {
int const kV = 1;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(1)));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_ANY_w_f32) {
int const kV = 1;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(0)));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHC
TEST(Reduction_TensorReduce, nhwc_ALL_w_f32) {
int const kV = 1;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
using Layout = cutlass::layout::TensorNHWC;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(1)));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/reduction/device/tensor_reduce_strided.cu/0 | {
"file_path": "test/unit/reduction/device/tensor_reduce_strided.cu",
"repo_id": "test",
"token_count": 5747
} | 64 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "../common/cutlass_unit_test.h"
#include "cutlass/util/device_rmsnorm.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/constants.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
using ElementType = cutlass::half_t;
using Layout = cutlass::layout::RowMajor;
void rmsnorm_host(cutlass::MatrixCoord tensor_size,
cutlass::TensorRef<ElementType, Layout> output,
cutlass::TensorRef<ElementType, Layout> input,
cutlass::TensorRef<ElementType, Layout> weight,
float epsilon) {
const int M = tensor_size.row();
const int N = tensor_size.column();
for (int m = 0; m < M; ++m) {
float square_sum{0};
for (int n = 0; n < N; ++n) {
float inp = static_cast<float>(input.at({m, n}));
square_sum += inp * inp;
}
float sq_mean = square_sum / (float)N;
float sqrt_var = cutlass::fast_sqrt(sq_mean + epsilon);
for (int n = 0; n < N; ++n) {
float inp = static_cast<float>(input.at({m, n}));
float g = static_cast<float>(weight.at({0, n}));
float res_fp32 = inp / sqrt_var * g;
output.at({m, n}) = ElementType(res_fp32);
}
}
}
void run_test(int M, int N) {
cutlass::HostTensor<ElementType, Layout> input, output_ref, output, weight;
input.reset({M, N});
output.reset({M, N});
output_ref.reset({M, N});
weight.reset({1, N});
const unsigned seed = 2022;
cutlass::reference::host::TensorFillRandomUniform(input.host_view(),
seed,
ElementType(5),
ElementType(-5),
0);
cutlass::reference::host::TensorFillRandomUniform(weight.host_view(),
seed,
ElementType(5),
ElementType(-5),
0);
input.sync_device();
weight.sync_device();
rmsnorm_host({M, N}, output_ref.host_ref(), input.host_ref(), weight.host_ref(), (float)1e-5);
cutlass::rmsnorm({M, N}, output.device_ref(),
input.device_ref(), weight.device_ref(), NULL, (float)1e-5L);
output.sync_host();
float max_abs_diff = -1;
float mean_abs_diff = 0;
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
auto diff = abs(static_cast<float>(output_ref.at({m, n}) - output.at({m, n})));
mean_abs_diff += diff;
max_abs_diff = max(max_abs_diff, diff);
}
}
mean_abs_diff /= float(M * N);
EXPECT_TRUE(max_abs_diff < 0.001f && mean_abs_diff < 0.001f)
<< "Max absolute difference : " << max_abs_diff << "\n"
<< "Mean absolute difference: " << mean_abs_diff;
}
TEST(RMSNorm, 16x1024) {
run_test(16, 1024);
}
TEST(RMSNorm, 1x127) {
run_test(1, 127);
}
| test/unit/util/rms_norm.cu/0 | {
"file_path": "test/unit/util/rms_norm.cu",
"repo_id": "test",
"token_count": 1619
} | 65 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for all GEMM operation kinds in CUTLASS Library.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/gemm/device/gemm_sparse.h"
#include "cutlass/gemm/device/gemm_complex.h"
#include "cutlass/gemm/device/gemm_batched.h"
#include "cutlass/gemm/device/gemm_array.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/gemm/kernel/default_gemm_planar_complex_universal.h"
#include "cutlass/library/library.h"
#include "library_internal.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmOperationBase : public Operation {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
// assuming all tensors use same type for StrideIndex
using StrideIndex = typename Operator::LayoutA::Index;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
protected:
///
GemmDescription description_;
public:
/// Constructor
GemmOperationBase(char const *name = "unknown_gemm") {
description_.name = name;
description_.provider = Provider::kCUTLASS;
description_.kind = OperationKind::kGemm;
description_.gemm_kind = GemmKind::kGemm;
description_.tile_description.threadblock_shape = make_Coord(
Operator::ThreadblockShape::kM,
Operator::ThreadblockShape::kN,
Operator::ThreadblockShape::kK);
description_.tile_description.threadblock_stages = Operator::kStages;
description_.tile_description.warp_count = make_Coord(
Operator::GemmKernel::WarpCount::kM,
Operator::GemmKernel::WarpCount::kN,
Operator::GemmKernel::WarpCount::kK);
description_.tile_description.math_instruction.instruction_shape = make_Coord(
Operator::InstructionShape::kM,
Operator::InstructionShape::kN,
Operator::InstructionShape::kK);
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
description_.tile_description.math_instruction.opcode_class =
OpcodeClassMap<typename Operator::OperatorClass>::kId;
description_.tile_description.math_instruction.math_operation =
MathOperationMap<typename Operator::MathOperator>::kId;
description_.tile_description.minimum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMin;
description_.tile_description.maximum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMax;
description_.A = make_TensorDescription<ElementA, LayoutA>(Operator::kAlignmentA);
description_.B = make_TensorDescription<ElementB, LayoutB>(Operator::kAlignmentB);
description_.C = make_TensorDescription<ElementC, LayoutC>(Operator::kAlignmentC);
description_.D = make_TensorDescription<ElementD, LayoutD>(Operator::kAlignmentC);
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.split_k_mode = SplitKMode::kNone;
description_.transform_A = ComplexTransformMap<Operator::kTransformA>::kId;
description_.transform_B = ComplexTransformMap<Operator::kTransformB>::kId;
}
/// Returns the description of the GEMM operation
virtual OperationDescription const & description() const {
return description_;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmOperation : public GemmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
GemmOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) {
this->description_.gemm_kind = GemmKind::kGemm;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
GemmConfiguration const *configuration) {
operator_args.problem_size = configuration->problem_size;
operator_args.ref_A = {nullptr, configuration->lda};
operator_args.ref_B = {nullptr, configuration->ldb};
operator_args.ref_C = {nullptr, configuration->ldc};
operator_args.ref_D = {nullptr, configuration->ldd};
operator_args.split_k_slices = configuration->split_k_slices;
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
GemmArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else {
return Status::kErrorInvalidProblem;
}
operator_args.ref_A.reset(static_cast<ElementA const *>(arguments->A));
operator_args.ref_B.reset(static_cast<ElementB const *>(arguments->B));
operator_args.ref_C.reset(static_cast<ElementC const *>(arguments->C));
operator_args.ref_D.reset(static_cast<ElementD *>(arguments->D));
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
GemmConfiguration const *configuration =
static_cast<GemmConfiguration const *>(configuration_ptr);
GemmArguments const *arguments =
static_cast<GemmArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
return Operator::get_workspace_size(args);
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
return op->initialize(args, device_workspace, stream);
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<GemmArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args);
if (status != Status::kSuccess) {
return status;
}
return op->run(stream);
}
void print_operator_args(OperatorArguments &operator_args) const {
#if 0
std::cout << "GemmOperation::OperatorArguments" << std::endl;
std::cout << " problem_size: " << operator_args.problem_size.m() << ", "<< operator_args.problem_size.n() << "," << operator_args.problem_size.k() << std::endl;
std::cout << " alpha: " << operator_args.epilogue.alpha << std::endl;
std::cout << " alpha_ptr: " << operator_args.epilogue.alpha_ptr << std::endl;
std::cout << " beta: " << operator_args.epilogue.beta << std::endl;
std::cout << " beta_ptr: " << operator_args.epilogue.beta_ptr << std::endl;
std::cout << " ref_A.data(): " << operator_args.ref_A.data() << std::endl;
std::cout << " ref_A.stride: " << operator_args.ref_A.stride(0) << std::endl;
std::cout << " ref_B.data(): " << operator_args.ref_B.data() << std::endl;
std::cout << " ref_B.stride: " << operator_args.ref_B.stride(0) << std::endl;
std::cout << " ref_C.data(): " << operator_args.ref_C.data() << std::endl;
std::cout << " ref_C.stride: " << operator_args.ref_C.stride(0) << std::endl;
#endif
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmSparseOperation : public GemmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
using ElementE = typename Operator::ElementE;
using LayoutE = typename Operator::LayoutE;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
GemmSparseOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) {
this->description_.kind = OperationKind::kSparseGemm;
this->description_.gemm_kind = GemmKind::kSparse;
this->description_.E = make_TensorDescription<ElementE, LayoutE>(Operator::kAlignmentE);
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
SparseGemmConfiguration const *configuration) {
operator_args.problem_size = configuration->problem_size;
operator_args.ref_A = {nullptr, configuration->lda};
operator_args.ref_B = {nullptr, configuration->ldb};
operator_args.ref_C = {nullptr, configuration->ldc};
operator_args.ref_D = {nullptr, configuration->ldd};
operator_args.ref_E = {nullptr, configuration->lde};
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
SparseGemmArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else {
return Status::kErrorInvalidProblem;
}
operator_args.ref_A.reset(static_cast<ElementA const *>(arguments->A));
operator_args.ref_B.reset(static_cast<ElementB const *>(arguments->B));
operator_args.ref_C.reset(static_cast<ElementC const *>(arguments->C));
operator_args.ref_D.reset(static_cast<ElementD *>(arguments->D));
operator_args.ref_E.reset(static_cast<ElementE const *>(arguments->E));
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
SparseGemmConfiguration const *configuration =
static_cast<SparseGemmConfiguration const *>(configuration_ptr);
SparseGemmArguments const *arguments =
static_cast<SparseGemmArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<SparseGemmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
return Operator::get_workspace_size(args);
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<SparseGemmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
return op->initialize(args, device_workspace, stream);
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<SparseGemmArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args);
if (status != Status::kSuccess) {
return status;
}
return op->run(stream);
}
void print_operator_args(OperatorArguments &operator_args) const {
#if 0
std::cout << "GemmOperation::OperatorArguments" << std::endl;
std::cout << " problem_size: " << operator_args.problem_size.m() << ", "<< operator_args.problem_size.n() << "," << operator_args.problem_size.k() << std::endl;
std::cout << " alpha: " << operator_args.epilogue.alpha << std::endl;
std::cout << " alpha_ptr: " << operator_args.epilogue.alpha_ptr << std::endl;
std::cout << " beta: " << operator_args.epilogue.beta << std::endl;
std::cout << " beta_ptr: " << operator_args.epilogue.beta_ptr << std::endl;
std::cout << " ref_A.data(): " << operator_args.ref_A.data() << std::endl;
std::cout << " ref_A.stride: " << operator_args.ref_A.stride(0) << std::endl;
std::cout << " ref_B.data(): " << operator_args.ref_B.data() << std::endl;
std::cout << " ref_B.stride: " << operator_args.ref_B.stride(0) << std::endl;
std::cout << " ref_C.data(): " << operator_args.ref_C.data() << std::endl;
std::cout << " ref_C.stride: " << operator_args.ref_C.stride(0) << std::endl;
#endif
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmUniversalOperation : public GemmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
GemmUniversalOperation(char const *name = "unknown_gemm"):
GemmOperationBase<Operator_>(name) {
this->description_.gemm_kind = GemmKind::kUniversal;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
GemmUniversalConfiguration const *configuration) {
operator_args.mode = configuration->mode;
operator_args.problem_size = configuration->problem_size;
operator_args.batch_count = configuration->batch_count;
operator_args.lda = (configuration->lda);
operator_args.ldb = (configuration->ldb);
operator_args.ldc = (configuration->ldc);
operator_args.ldd = (configuration->ldd);
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
GemmUniversalArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else {
return Status::kErrorInvalidProblem;
}
// update arguments
operator_args.ptr_A = arguments->A;
operator_args.ptr_B = arguments->B;
operator_args.ptr_C = arguments->C;
operator_args.ptr_D = arguments->D;
operator_args.batch_stride_A = arguments->batch_stride_A;
operator_args.batch_stride_B = arguments->batch_stride_B;
operator_args.batch_stride_C = arguments->batch_stride_C;
operator_args.batch_stride_D = arguments->batch_stride_D;
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
GemmUniversalConfiguration const *configuration =
static_cast<GemmUniversalConfiguration const *>(configuration_ptr);
GemmUniversalArguments const *arguments =
static_cast<GemmUniversalArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmUniversalConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
status = update_arguments_(
args,
static_cast<GemmUniversalArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return 0;
}
uint64_t size = Operator::get_workspace_size(args);
return size;
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmUniversalConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
status = op->initialize(args, device_workspace, stream);
return status;
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<GemmUniversalArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args);
if (status != Status::kSuccess) {
return status;
}
status = op->run(stream);
return status;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmPlanarComplexOperation : public GemmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
GemmPlanarComplexOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) {
this->description_.gemm_kind = GemmKind::kPlanarComplex;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
GemmPlanarComplexConfiguration const *configuration) {
operator_args.mode = cutlass::gemm::GemmUniversalMode::kBatched;
operator_args.problem_size = configuration->problem_size;
operator_args.batch_count = configuration->batch_count;
operator_args.lda_real = configuration->lda_real;
operator_args.lda_imag = configuration->lda_imag;
operator_args.ldb_real = configuration->ldb_real;
operator_args.ldb_imag = configuration->ldb_imag;
operator_args.ldc_real = configuration->ldc_real;
operator_args.ldc_imag = configuration->ldc_imag;
operator_args.ldd_real = configuration->ldd_real;
operator_args.ldd_imag = configuration->ldd_imag;
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
GemmPlanarComplexArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<cutlass::complex<ElementCompute> const *>(arguments->alpha),
*static_cast<cutlass::complex<ElementCompute> const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<cutlass::complex<ElementCompute> const *>(arguments->alpha),
static_cast<cutlass::complex<ElementCompute> const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else {
return Status::kErrorInvalidProblem;
}
// update arguments
operator_args.ptr_A_real = arguments->A_real;
operator_args.ptr_A_imag = arguments->A_imag;
operator_args.ptr_B_real = arguments->B_real;
operator_args.ptr_B_imag = arguments->B_imag;
operator_args.ptr_C_real = arguments->C_real;
operator_args.ptr_C_imag = arguments->C_imag;
operator_args.ptr_D_real = arguments->D_real;
operator_args.ptr_D_imag = arguments->D_imag;
operator_args.batch_stride_A = arguments->batch_stride_A_real;
operator_args.batch_stride_A_imag = arguments->batch_stride_A_imag;
operator_args.batch_stride_B = arguments->batch_stride_B_real;
operator_args.batch_stride_B_imag = arguments->batch_stride_B_imag;
operator_args.batch_stride_C = arguments->batch_stride_C_real;
operator_args.batch_stride_C_imag = arguments->batch_stride_C_imag;
operator_args.batch_stride_D = arguments->batch_stride_D_real;
operator_args.batch_stride_D_imag = arguments->batch_stride_D_imag;
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
GemmPlanarComplexConfiguration const *configuration =
static_cast<GemmPlanarComplexConfiguration const *>(configuration_ptr);
GemmPlanarComplexArguments const *arguments =
static_cast<GemmPlanarComplexArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmPlanarComplexConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
uint64_t size = Operator::get_workspace_size(args);
return size;
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmPlanarComplexConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
status = op->initialize(args, device_workspace, stream);
return status;
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<GemmPlanarComplexArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args);
if (status != Status::kSuccess) {
return status;
}
status = op->run(stream);
return status;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmPlanarComplexArrayOperation : public GemmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
GemmPlanarComplexArrayOperation(char const *name = "unknown_gemm"): GemmOperationBase<Operator_>(name) {
this->description_.gemm_kind = GemmKind::kPlanarComplexArray;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
GemmPlanarComplexArrayConfiguration const *configuration) {
operator_args.mode = cutlass::gemm::GemmUniversalMode::kArray;
operator_args.problem_size = configuration->problem_size;
operator_args.batch_count = configuration->batch_count;
operator_args.lda_real = configuration->lda_real;
operator_args.lda_imag = configuration->lda_imag;
operator_args.ldb_real = configuration->ldb_real;
operator_args.ldb_imag = configuration->ldb_imag;
operator_args.ldc_real = configuration->ldc_real;
operator_args.ldc_imag = configuration->ldc_imag;
operator_args.ldd_real = configuration->ldd_real;
operator_args.ldd_imag = configuration->ldd_imag;
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
GemmPlanarComplexArrayArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<cutlass::complex<ElementCompute> const *>(arguments->alpha),
*static_cast<cutlass::complex<ElementCompute> const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<cutlass::complex<ElementCompute> const *>(arguments->alpha),
static_cast<cutlass::complex<ElementCompute> const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else {
return Status::kErrorInvalidProblem;
}
// update arguments
operator_args.ptr_A_real = arguments->A_real;
operator_args.ptr_A_imag = arguments->A_imag;
operator_args.ptr_B_real = arguments->B_real;
operator_args.ptr_B_imag = arguments->B_imag;
operator_args.ptr_C_real = arguments->C_real;
operator_args.ptr_C_imag = arguments->C_imag;
operator_args.ptr_D_real = arguments->D_real;
operator_args.ptr_D_imag = arguments->D_imag;
operator_args.ptr_M = arguments->M;
operator_args.ptr_N = arguments->N;
operator_args.ptr_K = arguments->K;
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
GemmPlanarComplexArrayConfiguration const *configuration =
static_cast<GemmPlanarComplexArrayConfiguration const *>(configuration_ptr);
GemmPlanarComplexArrayArguments const *arguments =
static_cast<GemmPlanarComplexArrayArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmPlanarComplexArrayConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
uint64_t size = Operator::get_workspace_size(args);
return size;
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmPlanarComplexArrayConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
status = op->initialize(args, device_workspace, stream);
return status;
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<GemmPlanarComplexArrayArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args);
if (status != Status::kSuccess) {
return status;
}
status = op->run(stream);
return status;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class GemmGroupedOperation : public GemmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementD = ElementC;
using LayoutD = LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
GemmGroupedOperation(char const *name = "unknown_gemm"):
GemmOperationBase<Operator_>(name) {
this->description_.gemm_kind = GemmKind::kGrouped;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &op_args,
GemmGroupedConfiguration const *config) {
op_args.problem_count = config->problem_count;
op_args.threadblock_count = config->threadblock_count;
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &op_args,
GemmGroupedArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
op_args.output_op = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice) {
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
op_args.output_op = params;
}
else {
return Status::kErrorInvalidProblem;
}
op_args.problem_sizes = arguments->problem_sizes;
op_args.ptr_A = static_cast<ElementA **>(arguments->ptr_A);
op_args.ptr_B = static_cast<ElementB **>(arguments->ptr_B);
op_args.ptr_C = static_cast<ElementC **>(arguments->ptr_C);
op_args.ptr_D = static_cast<ElementD **>(arguments->ptr_D);
op_args.lda = arguments->lda;
op_args.ldb = arguments->ldb;
op_args.ldc = arguments->ldc;
op_args.ldd = arguments->ldd;
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
GemmGroupedConfiguration const *configuration =
static_cast<GemmGroupedConfiguration const *>(configuration_ptr);
GemmGroupedArguments const *arguments =
static_cast<GemmGroupedArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmGroupedConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
status = update_arguments_(
args,
static_cast<GemmGroupedArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return 0;
}
uint64_t size = Operator::get_workspace_size(args);
return size;
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<GemmGroupedConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
status = op->initialize(args, device_workspace, stream);
return status;
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<GemmGroupedArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args);
if (status != Status::kSuccess) {
return status;
}
status = op->run(stream);
return status;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/gemm_operation.h/0 | {
"file_path": "tools/library/src/gemm_operation.h",
"repo_id": "tools",
"token_count": 14489
} | 66 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for all TRMM operation kinds in CUTLASS Library.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/trmm.h"
#include "cutlass/gemm/kernel/default_trmm_universal.h"
#include "cutlass/gemm/kernel/trmm_universal.h"
#include "cutlass/library/library.h"
#include "library_internal.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class TrmmOperationBase : public Operation {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
static SideMode const kSideMode = Operator::kSideMode;
static FillMode const kFillMode = Operator::kFillMode;
static DiagType const kDiagType = Operator::kDiagType;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
protected:
///
TrmmDescription description_;
public:
/// Constructor
TrmmOperationBase(char const *name = "unknown_trmm") {
description_.name = name;
description_.provider = Provider::kCUTLASS;
description_.kind = OperationKind::kTrmm;
description_.trmm_kind = TrmmKind::kUniversal;
description_.side_mode = kSideMode;
description_.fill_mode = kFillMode;
description_.diag_type = kDiagType;
description_.tile_description.threadblock_shape = make_Coord(
Operator::ThreadblockShape::kM,
Operator::ThreadblockShape::kN,
Operator::ThreadblockShape::kK);
description_.tile_description.threadblock_stages = Operator::kStages;
description_.tile_description.warp_count = make_Coord(
Operator::TrmmKernel::WarpCount::kM,
Operator::TrmmKernel::WarpCount::kN,
Operator::TrmmKernel::WarpCount::kK);
description_.tile_description.math_instruction.instruction_shape = make_Coord(
Operator::InstructionShape::kM,
Operator::InstructionShape::kN,
Operator::InstructionShape::kK);
description_.tile_description.math_instruction.element_accumulator =
NumericTypeMap<ElementAccumulator>::kId;
description_.tile_description.math_instruction.opcode_class =
OpcodeClassMap<typename Operator::OperatorClass>::kId;
description_.tile_description.math_instruction.math_operation =
MathOperationMap<typename Operator::Operator>::kId;
description_.tile_description.minimum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMin;
description_.tile_description.maximum_compute_capability =
ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMax;
description_.A = make_TensorDescription<ElementA, LayoutA>(Operator::kAlignmentA);
description_.B = make_TensorDescription<ElementB, LayoutB>(Operator::kAlignmentB);
description_.D = make_TensorDescription<ElementC, LayoutC>(Operator::kAlignmentC);
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
description_.split_k_mode = SplitKMode::kNone;
description_.transform_A = ComplexTransformMap<Operator::kTransformA>::kId;
}
/// Returns the description of the TRMM operation
virtual OperationDescription const & description() const {
return description_;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class TrmmOperation : public TrmmOperationBase<Operator_> {
public:
using Operator = Operator_;
using ElementA = typename Operator::ElementA;
using LayoutA = typename Operator::LayoutA;
static SideMode const kSideMode = Operator::kSideMode;
static FillMode const kFillMode = Operator::kFillMode;
static DiagType const kDiagType = Operator::kDiagType;
using ElementB = typename Operator::ElementB;
using LayoutB = typename Operator::LayoutB;
using ElementC = typename Operator::ElementC;
using LayoutC = typename Operator::LayoutC;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
public:
/// Constructor
TrmmOperation(char const *name = "unknown_trmm"):
TrmmOperationBase<Operator_>(name) {
this->description_.trmm_kind = TrmmKind::kUniversal;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
TrmmConfiguration const *configuration) {
//operator_args.mode = configuration->mode;
operator_args.problem_size = configuration->problem_size;
operator_args.batch_count = configuration->batch_count;
operator_args.lda = int(configuration->lda);
operator_args.ldb = int(configuration->ldb);
operator_args.ldd = int(configuration->ldd);
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
TrmmArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::EpilogueOutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::EpilogueOutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.epilogue = params;
}
else {
return Status::kErrorInvalidProblem;
}
// update arguments
operator_args.ptr_A = arguments->A;
operator_args.ptr_B = arguments->B;
operator_args.batch_stride_A = arguments->batch_stride_A;
operator_args.batch_stride_B = arguments->batch_stride_B;
operator_args.ptr_D = arguments->D;
operator_args.batch_stride_D = arguments->batch_stride_D;
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
TrmmConfiguration const *configuration =
static_cast<TrmmConfiguration const *>(configuration_ptr);
TrmmArguments const *arguments =
static_cast<TrmmArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<TrmmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
uint64_t size = Operator::get_workspace_size(args);
return size;
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<TrmmConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
status = op->initialize(args, device_workspace, stream);
return status;
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<TrmmArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
bool need_swapped_matrices = (kSideMode == SideMode::kLeft &&
std::is_same<typename Operator::LayoutC, layout::ColumnMajor>::value) ||
(kSideMode == SideMode::kRight &&
std::is_same<typename Operator::LayoutC, layout::RowMajor>::value);
if (need_swapped_matrices) {
status = op->update(args.swapped_matrices(), device_workspace);
} else {
status = op->update(args, device_workspace);
}
if (status != Status::kSuccess) {
return status;
}
status = op->run(stream);
return status;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/trmm_operation.h/0 | {
"file_path": "tools/library/src/trmm_operation.h",
"repo_id": "tools",
"token_count": 3752
} | 67 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Class performing output during profiling
*/
#pragma once
#include <vector>
#include <fstream>
// CUTLASS Profiler includes
#include "options.h"
#include "enumerated_types.h"
#include "performance_result.h"
// CUTLASS Library includes
#include "cutlass/library/library.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
class PerformanceReport {
private:
/// Reference to options
Options const &options_;
/// Operation kind
library::OperationKind op_kind_;
/// Operation file name containing performance report of op_kind
std::string op_file_name_;
/// Output file containing results
std::ofstream output_file_;
/// Operation file name containing junit performance report of op_kind
std::string op_junit_file_name_;
/// Output file containing junit results
std::ofstream junit_output_file_;
/// Flag indicating the performance report is valid
bool good_;
/// Vector of argument names
std::vector<std::string> argument_names_;
/// Counter uniquely identifying problem within the report
size_t problem_index_;
/// Collection of all results
PerformanceResultVector concatenated_results_;
public:
PerformanceReport(Options const &options, std::vector<std::string> const &argument_names, library::OperationKind const &op_kind);
~PerformanceReport();
bool good() const { return good_; }
void next_problem();
void append_result(PerformanceResult result);
void sort_results(PerformanceResultVector &results);
void append_results(PerformanceResultVector const &results);
public:
/// Prints the CSV header
std::ostream & print_csv_header_(std::ostream &out);
/// Prints the CSV
std::ostream & print_result_csv_(std::ostream &out, PerformanceResult const &result);
/// @defgroup jUnit Result Generation
/// Functions related to generation of the jUnit results
/// @{
std::ostream & print_junit_header_(std::ostream &out);
std::ostream & print_junit_result_(std::ostream &out, PerformanceResult const &result);
std::ostream & print_junit_footer_(std::ostream &out);
/// @}
/// Prints the result in human readable form
std::ostream & print_result_pretty_(
std::ostream &out,
PerformanceResult const &result,
bool use_shell_coloring = true);
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
| tools/profiler/include/cutlass/profiler/performance_report.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/performance_report.h",
"repo_id": "tools",
"token_count": 1135
} | 68 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Provides several functions for filling tensors with data.
*/
#include "cutlass/profiler/enumerated_types.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
ExecutionMode enumerant;
}
ExecutionMode_enumerants[] = {
{"profile", "Profile", ExecutionMode::kProfile},
{"dry_run", "Dry run", ExecutionMode::kDryRun},
{"dry", "dry run", ExecutionMode::kDryRun},
{"trace", "Trace", ExecutionMode::kTrace},
{"enumerate", "Enumerate", ExecutionMode::kEnumerate}
};
/// Converts a ExecutionMode enumerant to a string
char const *to_string(ExecutionMode mode, bool pretty) {
for (auto const & possible : ExecutionMode_enumerants) {
if (mode == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Parses a ExecutionMode enumerant from a string
template <>
ExecutionMode from_string<ExecutionMode>(std::string const &str) {
for (auto const & possible : ExecutionMode_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return ExecutionMode::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
AlgorithmMode enumerant;
}
AlgorithmMode_enumerants[] = {
{"matching", "Matching", AlgorithmMode::kMatching},
{"best", "Best", AlgorithmMode::kBest},
{"default", "Default", AlgorithmMode::kDefault}
};
/// Converts a ExecutionMode enumerant to a string
char const *to_string(AlgorithmMode mode, bool pretty) {
for (auto const & possible : AlgorithmMode_enumerants) {
if (mode == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Parses a ExecutionMode enumerant from a string
template <>
AlgorithmMode from_string<AlgorithmMode>(std::string const &str) {
for (auto const & possible : AlgorithmMode_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return AlgorithmMode::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
Disposition enumerant;
}
Disposition_enumerants[] = {
{"passed", "Passed", Disposition::kPassed},
{"failed", "Failed", Disposition::kFailed},
{"not_run", "Not run", Disposition::kNotRun},
{"not_verified", "Not verified", Disposition::kNotVerified},
{"invalid_problem", "Invalid problem", Disposition::kInvalidProblem},
{"not_supported", "Not supported", Disposition::kNotSupported},
{"incorrect", "Incorrect", Disposition::kIncorrect}
};
/// Converts a Disposition enumerant to a string
char const *to_string(Disposition disposition, bool pretty) {
for (auto const & possible : Disposition_enumerants) {
if (disposition == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Parses a Disposition enumerant from a string
template <>
Disposition from_string<Disposition>(std::string const &str) {
for (auto const & possible : Disposition_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return Disposition::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
SaveWorkspace enumerant;
}
SaveWorkspace_enumerants[] = {
{"never", "Never", SaveWorkspace::kNever},
{"incorrect", "Incorrect", SaveWorkspace::kIncorrect},
{"always", "Always", SaveWorkspace::kAlways}
};
/// Converts a SaveWorkspace enumerant to a string
char const *to_string(SaveWorkspace save_option, bool pretty) {
for (auto const & possible : SaveWorkspace_enumerants) {
if (save_option == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Parses a SaveWorkspace enumerant from a string
template <>
SaveWorkspace from_string<SaveWorkspace>(std::string const &str) {
for (auto const & possible : SaveWorkspace_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return SaveWorkspace::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static struct {
char const *text;
char const *pretty;
ArgumentTypeID enumerant;
}
ArgumentTypeID_enumerants[] = {
{"scalar", "Scalar", ArgumentTypeID::kScalar},
{"int", "Integer", ArgumentTypeID::kInteger},
{"tensor", "Tensor", ArgumentTypeID::kTensor},
{"batched_tensor", "BatchedTensor", ArgumentTypeID::kBatchedTensor},
{"struct", "Struct", ArgumentTypeID::kStructure},
{"enum", "Enumerated type", ArgumentTypeID::kEnumerated}
};
/// Converts a ArgumentTypeID enumerant to a string
char const *to_string(ArgumentTypeID type, bool pretty) {
for (auto const & possible : ArgumentTypeID_enumerants) {
if (type == possible.enumerant) {
if (pretty) {
return possible.pretty;
}
else {
return possible.text;
}
}
}
return pretty ? "Invalid" : "invalid";
}
/// Parses a ArgumentTypeID enumerant from a string
template <>
ArgumentTypeID from_string<ArgumentTypeID>(std::string const &str) {
for (auto const & possible : ArgumentTypeID_enumerants) {
if ((str.compare(possible.text) == 0) ||
(str.compare(possible.pretty) == 0)) {
return possible.enumerant;
}
}
return ArgumentTypeID::kInvalid;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/enumerated_types.cpp/0 | {
"file_path": "tools/profiler/src/enumerated_types.cpp",
"repo_id": "tools",
"token_count": 2582
} | 69 |
/******************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* Utility for parsing command line arguments
*/
#include <iostream>
#include <limits>
#include <sstream>
#include <string>
#include <vector>
#include <cuda_runtime.h>
#include "cutlass/cutlass.h"
namespace cutlass {
/******************************************************************************
* command_line
******************************************************************************/
/**
* Utility for parsing command line arguments
*/
struct CommandLine {
std::vector<std::string> keys;
std::vector<std::string> values;
std::vector<std::string> args;
/**
* Constructor
*/
CommandLine(int argc, const char** argv) {
using namespace std;
for (int i = 1; i < argc; i++) {
string arg = argv[i];
if ((arg[0] != '-') || (arg[1] != '-')) {
args.push_back(arg);
continue;
}
string::size_type pos;
string key, val;
if ((pos = arg.find('=')) == string::npos) {
key = string(arg, 2, arg.length() - 2);
val = "";
} else {
key = string(arg, 2, pos - 2);
val = string(arg, pos + 1, arg.length() - 1);
}
keys.push_back(key);
values.push_back(val);
}
}
/**
* Checks whether a flag "--<flag>" is present in the commandline
*/
bool check_cmd_line_flag(const char* arg_name) const {
using namespace std;
for (int i = 0; i < int(keys.size()); ++i) {
if (keys[i] == string(arg_name)) return true;
}
return false;
}
/**
* Returns number of naked (non-flag and non-key-value) commandline parameters
*/
size_t num_naked_args() const {
return args.size();
}
/**
* Print naked (non-flag and non-key-value) commandline parameters
*/
void print_naked_args(std::ostream &out) const {
for (auto arg : args) {
out << " " << arg <<"\n";
}
}
/**
* Returns the commandline parameter for a given index (not including flags)
*/
template <typename value_t>
void get_cmd_line_argument(size_t index, value_t& val) const {
using namespace std;
if (index < args.size()) {
istringstream str_stream(args[index]);
str_stream >> val;
}
}
/**
* Obtains the boolean value specified for a given commandline parameter --<flag>=<bool>
*/
void get_cmd_line_argument(const char* arg_name, bool& val, bool _default) const {
val = _default;
if (check_cmd_line_flag(arg_name)) {
std::string value;
get_cmd_line_argument(arg_name, value);
val = !(value == "0" || value == "false");
}
}
/**
* Obtains the value specified for a given commandline parameter --<flag>=<value>
*/
template <typename value_t>
void get_cmd_line_argument(const char* arg_name,
value_t& val) const {
get_cmd_line_argument(arg_name, val, val);
}
/**
* Obtains the value specified for a given commandline parameter --<flag>=<value>
*/
template <typename value_t>
void get_cmd_line_argument(const char* arg_name,
value_t& val,
value_t const& _default) const {
using namespace std;
val = _default;
for (int i = 0; i < int(keys.size()); ++i) {
if (keys[i] == string(arg_name)) {
istringstream str_stream(values[i]);
str_stream >> val;
}
}
}
/**
* Returns the values specified for a given commandline parameter --<flag>=<value>,<value>*
*/
template <typename value_t>
void get_cmd_line_arguments(const char* arg_name,
std::vector<value_t>& vals,
char sep = ',') const {
using namespace std;
if (check_cmd_line_flag(arg_name)) {
// Clear any default values
vals.clear();
// Recover from multi-value string
for (size_t i = 0; i < keys.size(); ++i) {
if (keys[i] == string(arg_name)) {
string val_string(values[i]);
separate_string(val_string, vals, sep);
}
}
}
}
/**
* Returns the values specified for a given commandline parameter
* --<flag>=<value>,<value_start:value_end>*
*/
void get_cmd_line_argument_pairs(const char* arg_name,
std::vector<std::pair<std::string, std::string> >& tokens,
char delim = ',',
char sep = ':') const {
if (check_cmd_line_flag(arg_name)) {
std::string value;
get_cmd_line_argument(arg_name, value);
tokenize(tokens, value, delim, sep);
}
}
/**
* Returns a list of ranges specified for a given commandline parameter
* --<flag>=<key:value>,<key:value>*
*/
void get_cmd_line_argument_ranges(const char* arg_name,
std::vector<std::vector<std::string> >& vals,
char delim = ',',
char sep = ':') const {
std::vector<std::string> ranges;
get_cmd_line_arguments(arg_name, ranges, delim);
for (std::vector<std::string>::const_iterator range = ranges.begin();
range != ranges.end(); ++range) {
std::vector<std::string> range_vals;
separate_string(*range, range_vals, sep);
vals.push_back(range_vals);
}
}
/**
* The number of pairs parsed
*/
int parsed_argc() const { return (int)keys.size(); }
//-------------------------------------------------------------------------
// Utility functions
//-------------------------------------------------------------------------
/// Tokenizes a comma-delimited list of string pairs delimited by ':'
static void tokenize(std::vector<std::pair<std::string, std::string> >& tokens,
std::string const& str,
char delim = ',',
char sep = ':') {
// Home-built to avoid Boost dependency
size_t s_idx = 0;
size_t d_idx = std::string::npos;
while (s_idx < str.size()) {
d_idx = str.find_first_of(delim, s_idx);
size_t end_idx = (d_idx != std::string::npos ? d_idx : str.size());
size_t sep_idx = str.find_first_of(sep, s_idx);
size_t offset = 1;
if (sep_idx == std::string::npos || sep_idx >= end_idx) {
sep_idx = end_idx;
offset = 0;
}
std::pair<std::string, std::string> item(
str.substr(s_idx, sep_idx - s_idx),
str.substr(sep_idx + offset, end_idx - sep_idx - offset));
tokens.push_back(item);
s_idx = end_idx + 1;
}
}
/// Tokenizes a comma-delimited list of string pairs delimited by ':'
static void tokenize(std::vector<std::string>& tokens,
std::string const& str,
char delim = ',',
char sep = ':') {
typedef std::vector<std::pair<std::string, std::string> > TokenVector;
typedef TokenVector::const_iterator token_iterator;
std::vector<std::pair<std::string, std::string> > token_pairs;
tokenize(token_pairs, str, delim, sep);
for (token_iterator tok = token_pairs.begin(); tok != token_pairs.end(); ++tok) {
tokens.push_back(tok->first);
}
}
template <typename value_t>
static void separate_string(std::string const& str,
std::vector<value_t>& vals,
char sep = ',') {
std::istringstream str_stream(str);
std::string::size_type old_pos = 0;
std::string::size_type new_pos = 0;
// Iterate <sep>-delimited values
value_t val;
while ((new_pos = str.find(sep, old_pos)) != std::string::npos) {
if (new_pos != old_pos) {
str_stream.width(new_pos - old_pos);
str_stream >> val;
vals.push_back(val);
}
// skip over delimiter
str_stream.ignore(1);
old_pos = new_pos + 1;
}
// Read last value
str_stream >> val;
vals.push_back(val);
}
};
} // namespace cutlass
| tools/util/include/cutlass/util/command_line.h/0 | {
"file_path": "tools/util/include/cutlass/util/command_line.h",
"repo_id": "tools",
"token_count": 3938
} | 70 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cuda.h>
#include <cute/util/debug.hpp>
namespace cute
{
void
device_init(int device_id, bool quiet = false)
{
cudaDeviceProp device_prop;
std::size_t device_free_physmem;
std::size_t device_total_physmem;
CUTE_CHECK_ERROR(cudaSetDevice(device_id));
CUTE_CHECK_ERROR(cudaMemGetInfo(&device_free_physmem, &device_total_physmem));
CUTE_CHECK_ERROR(cudaGetDeviceProperties(&device_prop, device_id));
if (device_prop.major < 1) {
fprintf(stderr, "Device does not support CUDA.\n");
exit(1);
}
//float device_giga_bandwidth = float(device_prop.memoryBusWidth) * device_prop.memoryClockRate * 2 / 8 / 1000 / 1000;
if (!quiet) {
printf("Using device %d: %s (SM%d, %d SMs)\n",
device_id, device_prop.name,
device_prop.major * 10 + device_prop.minor,
device_prop.multiProcessorCount);
fflush(stdout);
}
}
/**
* Convert the SM version (e.g. v7.0, v7.5) to the physical number of cores.
*/
inline int
_ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct {
int SM; // 0xMm (hexadecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf("MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
} // end namespace cute
| tools/util/include/cutlass/util/helper_cuda.hpp/0 | {
"file_path": "tools/util/include/cutlass/util/helper_cuda.hpp",
"repo_id": "tools",
"token_count": 1388
} | 71 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <curand_kernel.h>
#include "cutlass/cutlass.h"
namespace cutlass {
namespace reference {
namespace device {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize tensor to uniform random distribution
template <typename T>
__global__ void TensorInitializeUniform(
Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) {
__shared__ curandState_t rng_state[1024];
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
curand_init(seed, gtid, 0, &rng_state[threadIdx.x]);
int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = blockIdx.y * blockDim.x;
tensor += s_idx * ldm + c_idx;
for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) {
if (s_idx < dim_strided && c_idx < dim_contiguous) {
double range = dist.uniform.max - dist.uniform.min;
double rnd = curand_uniform(&rng_state[threadIdx.x]);
rnd = dist.uniform.min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
if (dist.int_scale >= 0) {
rnd = double(int(rnd * double(1 << dist.int_scale)));
*tensor = T(rnd / double(1 << dist.int_scale));
} else {
*tensor = T(rnd);
}
tensor += ldm;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize tensor to uniform distribution
template <typename T>
__global__ void TensorInitializeGaussian(
Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) {
__shared__ curandState_t rng_state[1024];
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
curand_init(seed, gtid, 0, &rng_state[threadIdx.x]);
int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = blockIdx.y * blockDim.x;
tensor += s_idx * ldm + c_idx;
for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) {
if (s_idx < dim_strided && c_idx < dim_contiguous) {
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
double rnd = curand_normal(&rng_state[threadIdx.x]);
rnd = dist.gaussian.mean + dist.gaussian.stddev * rnd;
if (dist.int_scale >= 0) {
rnd = double(int(rnd * double(1 << dist.int_scale)));
*tensor = T(rnd / double(1 << dist.int_scale));
} else {
*tensor = T(rnd);
}
}
}
}
/// Kernel to initialize tensor to an identity matrix
template <typename T>
__global__ void TensorInitializeLinear(
Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) {
__shared__ curandState_t rng_state[1024];
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
curand_init(seed, gtid, 0, &rng_state[threadIdx.x]);
int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = blockIdx.y * blockDim.x;
tensor += s_idx * ldm + c_idx;
for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) {
if (s_idx < dim_strided && c_idx < dim_contiguous) {
*tensor =
dist.linear.offset + dist.linear.delta_row * c_idx + dist.linear.delta_column * s_idx;
}
}
}
/// Kernel to initialize tensor to an identity matrix
template <typename T>
__global__ void TensorInitializeIdentity(
Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) {
__shared__ curandState_t rng_state[1024];
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
curand_init(seed, gtid, 0, &rng_state[threadIdx.x]);
int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = blockIdx.y * blockDim.x;
tensor += s_idx * ldm + c_idx;
for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) {
if (s_idx < dim_strided && c_idx < dim_contiguous) {
*tensor = (c_idx == s_idx ? T(1) : T(0));
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace device
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/device/kernel/tensor_elementwise.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/kernel/tensor_elementwise.h",
"repo_id": "tools",
"token_count": 2183
} | 72 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for Rank 2k update in host-side code.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/mma.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
FillMode FillModeC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_rank2k(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2,
"Tensors must be of rank 2");
static_assert(
FillModeC == FillMode::kLower ||
FillModeC == FillMode::kUpper,
"Fill Mode can either be Lower or Upper.");
using CompareOp = typename platform::conditional<(FillModeC == FillMode::kLower),
std::greater_equal<int>,
std::less_equal<int>>::type;
// Note: batch is ignored.
// Note: M is same as N for Rank 2k update
int const N = problem_size.n();
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
CompareOp compare_op;
for (int row_block = 0; row_block < N; row_block += Nblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Nblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Nblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Nblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < N && col < N && compare_op(row, col))
{
// A x B^T
ElementA a = tensor_a.at(MatrixCoord(row, k_block));
ElementB b_t = tensor_b.at(MatrixCoord(col, k_block));
ComputeType compute_a(cast_if_scalar<ComputeType>(a));
ComputeType compute_b_t(cast_if_scalar<ComputeType>(b_t));
accum[i][j] = inner_product_op(compute_a, compute_b_t, accum[i][j]);
// B x A^T
ElementB b = tensor_b.at(MatrixCoord(row, k_block));
ElementA a_t = tensor_a.at(MatrixCoord(col, k_block));
ComputeType compute_b(cast_if_scalar<ComputeType>(b));
ComputeType compute_a_t(cast_if_scalar<ComputeType>(a_t));
accum[i][j] = inner_product_op(compute_b, compute_a_t, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Nblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < N && col < N &&
( (FillModeC == FillMode::kLower && row >= col) ||
(FillModeC == FillMode::kUpper && row <= col) )
) {
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]) +
beta * ScalarType(tensor_c.at(coord)));
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general Rank 2k update (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
FillMode FillModeC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_rank2k(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum) {
compute_rank2k<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC,
ScalarType, ComputeType, InnerProductOp, ConvertOp>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c,
initial_accum);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
FillMode FillModeC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = cutlass::arch::OpMultiplyAdd
>
struct Rank2K;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC, FillMode FillModeC,
typename ScalarType, typename ComputeType>
struct Rank2K<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC, ScalarType,
ComputeType, arch::OpMultiplyAdd> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_rank2k<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_rank2k<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/host/rank_2k.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/rank_2k.h",
"repo_id": "tools",
"token_count": 3649
} | 73 |
get_filename_component(NvidiaCutlass_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
include(CMakeFindDependencyMacro)
if(TARGET nvidia::cutlass::CUTLASS)
return()
endif()
include("${NvidiaCutlass_CMAKE_DIR}/NvidiaCutlassTargets.cmake")
| cmake/NvidiaCutlassConfig.cmake.in/0 | {
"file_path": "cmake/NvidiaCutlassConfig.cmake.in",
"repo_id": "cmake",
"token_count": 97
} | 0 |
var searchData=
[
['quiet_5fnan',['quiet_NaN',['../structstd_1_1numeric__limits_3_01cutlass_1_1half__t_01_4.html#a8c7eafdd3b121353c0914dc6e1c0d108',1,'std::numeric_limits< cutlass::half_t >']]]
];
| docs/search/all_10.js/0 | {
"file_path": "docs/search/all_10.js",
"repo_id": "docs",
"token_count": 103
} | 1 |
var searchData=
[
['yes',['yes',['../structcutlass_1_1platform_1_1is__base__of__helper.html#ac1cf3f804e7686213fd42c678cc6d669',1,'cutlass::platform::is_base_of_helper']]]
];
| docs/search/all_18.js/0 | {
"file_path": "docs/search/all_18.js",
"repo_id": "docs",
"token_count": 79
} | 2 |
var searchData=
[
['arch',['arch',['../namespacecutlass_1_1arch.html',1,'cutlass']]],
['cutlass',['cutlass',['../namespacecutlass.html',1,'']]],
['debug',['debug',['../namespacecutlass_1_1debug.html',1,'cutlass']]],
['detail',['detail',['../namespacecutlass_1_1detail.html',1,'cutlass']]],
['detail',['detail',['../namespacecutlass_1_1gemm_1_1thread_1_1detail.html',1,'cutlass::gemm::thread']]],
['detail',['detail',['../namespacecutlass_1_1reference_1_1host_1_1detail.html',1,'cutlass::reference::host']]],
['detail',['detail',['../namespacecutlass_1_1epilogue_1_1threadblock_1_1detail.html',1,'cutlass::epilogue::threadblock']]],
['detail',['detail',['../namespacecutlass_1_1gemm_1_1threadblock_1_1detail.html',1,'cutlass::gemm::threadblock']]],
['detail',['detail',['../namespacecutlass_1_1reference_1_1detail.html',1,'cutlass::reference']]],
['detail',['detail',['../namespacecutlass_1_1gemm_1_1kernel_1_1detail.html',1,'cutlass::gemm::kernel']]],
['detail',['detail',['../namespacecutlass_1_1reference_1_1device_1_1detail.html',1,'cutlass::reference::device']]],
['detail',['detail',['../namespacecutlass_1_1reference_1_1device_1_1kernel_1_1detail.html',1,'cutlass::reference::device::kernel']]],
['device',['device',['../namespacecutlass_1_1gemm_1_1device.html',1,'cutlass::gemm']]],
['device',['device',['../namespacecutlass_1_1reference_1_1device.html',1,'cutlass::reference']]],
['device_5fmemory',['device_memory',['../namespacecutlass_1_1device__memory.html',1,'cutlass']]],
['epilogue',['epilogue',['../namespacecutlass_1_1epilogue.html',1,'cutlass']]],
['gemm',['gemm',['../namespacecutlass_1_1gemm.html',1,'cutlass']]],
['host',['host',['../namespacecutlass_1_1reference_1_1host.html',1,'cutlass::reference']]],
['kernel',['kernel',['../namespacecutlass_1_1reduction_1_1kernel.html',1,'cutlass::reduction']]],
['kernel',['kernel',['../namespacecutlass_1_1gemm_1_1kernel.html',1,'cutlass::gemm']]],
['kernel',['kernel',['../namespacecutlass_1_1reference_1_1device_1_1kernel.html',1,'cutlass::reference::device']]],
['layout',['layout',['../namespacecutlass_1_1layout.html',1,'cutlass']]],
['library',['library',['../namespacecutlass_1_1library.html',1,'cutlass']]],
['platform',['platform',['../namespacecutlass_1_1platform.html',1,'cutlass']]],
['reduction',['reduction',['../namespacecutlass_1_1reduction.html',1,'cutlass']]],
['reference',['reference',['../namespacecutlass_1_1reference.html',1,'cutlass']]],
['thread',['thread',['../namespacecutlass_1_1gemm_1_1thread.html',1,'cutlass::gemm']]],
['thread',['thread',['../namespacecutlass_1_1reference_1_1device_1_1thread.html',1,'cutlass::reference::device']]],
['thread',['thread',['../namespacecutlass_1_1thread.html',1,'cutlass']]],
['thread',['thread',['../namespacecutlass_1_1reduction_1_1thread.html',1,'cutlass::reduction']]],
['thread',['thread',['../namespacecutlass_1_1epilogue_1_1thread.html',1,'cutlass::epilogue']]],
['thread',['thread',['../namespacecutlass_1_1transform_1_1thread.html',1,'cutlass::transform']]],
['threadblock',['threadblock',['../namespacecutlass_1_1epilogue_1_1threadblock.html',1,'cutlass::epilogue']]],
['threadblock',['threadblock',['../namespacecutlass_1_1gemm_1_1threadblock.html',1,'cutlass::gemm']]],
['threadblock',['threadblock',['../namespacecutlass_1_1transform_1_1threadblock.html',1,'cutlass::transform']]],
['transform',['transform',['../namespacecutlass_1_1transform.html',1,'cutlass']]],
['warp',['warp',['../namespacecutlass_1_1epilogue_1_1warp.html',1,'cutlass::epilogue']]],
['warp',['warp',['../namespacecutlass_1_1gemm_1_1warp.html',1,'cutlass::gemm']]]
];
| docs/search/namespaces_0.js/0 | {
"file_path": "docs/search/namespaces_0.js",
"repo_id": "docs",
"token_count": 1418
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to use the PredicatedTileIterator in CUTLASS to load data from
addressable memory, and then store it back into addressable memory.
TileIterator is a core concept in CUTLASS that enables efficient loading and storing of data to
and from addressable memory. The PredicateTileIterator accepts a ThreadMap type, which defines
the mapping of threads to a "tile" in memory. This separation of concerns enables user-defined
thread mappings to be specified.
In this example, a PredicatedTileIterator is used to load elements from a tile in global memory,
stored in column-major layout, into a fragment and then back into global memory in the same
layout.
This example uses CUTLASS utilities to ease the matrix operations.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// CUTLASS includes
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
//
// CUTLASS utility includes
//
// Defines operator<<() to write TensorView objects to std::ostream
#include "cutlass/util/tensor_view_io.h"
// Defines cutlass::HostTensor<>
#include "cutlass/util/host_tensor.h"
// Defines cutlass::reference::host::TensorFill() and
// cutlass::reference::host::TensorFillBlockSequential()
#include "cutlass/util/reference/host/tensor_fill.h"
#pragma warning( disable : 4503)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define PredicatedTileIterators to load and store a M-by-K tile, in column major layout.
template <typename Iterator>
__global__ void copy(
typename Iterator::Params dst_params,
typename Iterator::Element *dst_pointer,
typename Iterator::Params src_params,
typename Iterator::Element *src_pointer,
cutlass::Coord<2> extent) {
Iterator dst_iterator(dst_params, dst_pointer, extent, threadIdx.x);
Iterator src_iterator(src_params, src_pointer, extent, threadIdx.x);
// PredicatedTileIterator uses PitchLinear layout and therefore takes in a PitchLinearShape.
// The contiguous dimension can be accessed via Iterator::Shape::kContiguous and the strided
// dimension can be accessed via Iterator::Shape::kStrided
int iterations = (extent[1] + Iterator::Shape::kStrided - 1) / Iterator::Shape::kStrided;
typename Iterator::Fragment fragment;
for(size_t i = 0; i < fragment.size(); ++i) {
fragment[i] = 0;
}
src_iterator.load(fragment);
dst_iterator.store(fragment);
++src_iterator;
++dst_iterator;
for(; iterations > 1; --iterations) {
src_iterator.load(fragment);
dst_iterator.store(fragment);
++src_iterator;
++dst_iterator;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// Initializes the source tile with sequentially increasing values and performs the copy into
// the destination tile using two PredicatedTileIterators, one to load the data from addressable
// memory into a fragment (regiser-backed array of elements owned by each thread) and another to
// store the data from the fragment back into the addressable memory of the destination tile.
cudaError_t TestTileIterator(int M, int K) {
// For this example, we chose a <64, 4> tile shape. The PredicateTileIterator expects
// PitchLinearShape and PitchLinear layout.
using Shape = cutlass::layout::PitchLinearShape<64, 4>;
using Layout = cutlass::layout::PitchLinear;
using Element = int;
int const kThreads = 32;
// ThreadMaps define how threads are mapped to a given tile. The PitchLinearStripminedThreadMap
// stripmines a pitch-linear tile among a given number of threads, first along the contiguous
// dimension then along the strided dimension.
using ThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<Shape, kThreads>;
// Define the PredicateTileIterator, using TileShape, Element, Layout, and ThreadMap types
using Iterator = cutlass::transform::threadblock::PredicatedTileIterator<
Shape, Element, Layout, 1, ThreadMap>;
cutlass::Coord<2> copy_extent = cutlass::make_Coord(M, K);
cutlass::Coord<2> alloc_extent = cutlass::make_Coord(M, K);
// Allocate source and destination tensors
cutlass::HostTensor<Element, Layout> src_tensor(alloc_extent);
cutlass::HostTensor<Element, Layout> dst_tensor(alloc_extent);
Element oob_value = Element(-1);
// Initialize destination tensor with all -1s
cutlass::reference::host::TensorFill(dst_tensor.host_view(), oob_value);
// Initialize source tensor with sequentially increasing values
cutlass::reference::host::BlockFillSequential(src_tensor.host_data(), src_tensor.capacity());
dst_tensor.sync_device();
src_tensor.sync_device();
typename Iterator::Params dst_params(dst_tensor.layout());
typename Iterator::Params src_params(src_tensor.layout());
dim3 block(kThreads, 1);
dim3 grid(1, 1);
// Launch copy kernel to perform the copy
copy<Iterator><<< grid, block >>>(
dst_params,
dst_tensor.device_data(),
src_params,
src_tensor.device_data(),
copy_extent
);
cudaError_t result = cudaGetLastError();
if(result != cudaSuccess) {
std::cerr << "Error - kernel failed." << std::endl;
return result;
}
dst_tensor.sync_host();
// Verify results
for(int s = 0; s < alloc_extent[1]; ++s) {
for(int c = 0; c < alloc_extent[0]; ++c) {
Element expected = Element(0);
if(c < copy_extent[0] && s < copy_extent[1]) {
expected = src_tensor.at({c, s});
}
else {
expected = oob_value;
}
Element got = dst_tensor.at({c, s});
bool equal = (expected == got);
if(!equal) {
std::cerr << "Error - source tile differs from destination tile." << std::endl;
return cudaErrorUnknown;
}
}
}
return cudaSuccess;
}
int main(int argc, const char *arg[]) {
cudaError_t result = TestTileIterator(57, 35);
if(result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit
return result == cudaSuccess ? 0 : -1;
}
| examples/04_tile_iterator/tile_iterator.cu/0 | {
"file_path": "examples/04_tile_iterator/tile_iterator.cu",
"repo_id": "examples",
"token_count": 2658
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
*/
#include <algorithm>
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// Note that if the output is column major, the bias has to be per row. i.e. every row has different bias.
// If the output is row major, the bias has to be per column, i.e. every column has different bias.
// Below list some other notices:
//
// Note this example only works for ColumnMajor output because
// 1) we only have row major epilogue.
// 2) we swap A and B if the output is column major then we can still use the
// row major epilogue.
// 3) Mx1 bias vector becomes 1xM after the swapping/transposing.
// 4) we can use the existing OutputIterator to load 1xM bias vector.
using LayoutInputA = cutlass::layout::ColumnMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::ColumnMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm75;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// Define the epilogue operation as LinearCombinationRelu. This is approximately equal to
//
// d_ij = max(0, alpha * sum_k(a_ik * b_kj) + c_ij )
//
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per
// vectorized memory access. For half
// precision, it's 8 elements. This becomes
// the vector width of math instructions in
// epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue, // <- data type for alpha in linear combination function
cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // <- alpha x C + bias
// Number of pipelines you want to use
constexpr int NumStages = 2;
using Gemm = cutlass::gemm::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
int run() {
const int length_m = 5120;
const int length_n = 4096;
const int length_k = 4096;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias(
{problem_size.m(), 1}); // <- Create matrix C with dimensions M x 1
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_bias.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c_bias.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{
problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
{tensor_c_bias.device_data(), 0}, // <- the C matrix is treated as the bias vector. We can enable the GEMM
// to project away the N dimension by setting the stride to zero.
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha}, // <- alpha
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
//
// Create instantiation for device reference gemm kernel
//
cutlass::reference::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device_reference;
// Launch device reference to compute strictly the product A * B
gemm_device_reference(
problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
0,
tensor_ref_d.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Compute bias + relu in host code
for (int i = 0; i < problem_size.m(); ++i) {
for (int j = 0; j < problem_size.n(); ++j) {
tensor_ref_d.at({i, j}) = std::max(
ElementOutput(0),
ElementOutput(tensor_ref_d.at({i, j}) + tensor_c_bias.at({i, 0}))
);
}
}
// Check if output from CUTLASS kernel and reference kernel are equal or not
std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(),
tensor_ref_d.host_view())
? "Passed"
: "Failed")
<< std::endl;
CUTLASS_CHECK(status);
return 0;
}
int main() {
bool notSupported = false;
// Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2.
//
// CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!(props.major * 10 + props.minor >= 75)) {
std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
return run();
}
| examples/12_gemm_bias_relu/gemm_bias_relu.cu/0 | {
"file_path": "examples/12_gemm_bias_relu/gemm_bias_relu.cu",
"repo_id": "examples",
"token_count": 5210
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "device/b2b_implicit_gemm_convolution.h"
#include "b2b_interleaved_conv2d_run.h"
#include "test_run.h"
////////////////////////////////////////////////////////////////////////////////
cutlass::conv::Conv2dProblemSize conv2d_s8_sm80_problem_size_0 (
{32, 56, 56, 64}, // input size (NHWC)
{64, 3, 3, 64}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
{32, 56, 56, 64} // output size (NPQK)
);
cutlass::conv::Conv2dProblemSize conv2d_s8_sm80_problem_size_1 (
{32, 56, 56, 64}, // input size (NHWC)
{128, 1, 1, 64}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
{32, 56, 56, 128} // output size (NPQK)
);
bool run_nonfused_conv2d_fprop_optimized_s8_sm80() {
using ElementA = int8_t;
using ElementB = int8_t;
using ElementC = int8_t;
using ElementAccumulator = int32_t;
using ElementCompute = float;
ElementCompute alpha0 = ElementCompute(1);
ElementCompute beta0 = ElementCompute(1); //beta=1 for bias
ElementCompute alpha1 = ElementCompute(1);
ElementCompute beta1 = ElementCompute(1); //beta=1 for bias
using ThreadblockShape0 = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape0 = cutlass::gemm::GemmShape<64, 64, 64>;
using ThreadblockShape1 = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape1 = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using Conv2dFpropKernel0 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementA, cutlass::layout::TensorNCxHWx<32>,
ElementB, cutlass::layout::TensorCxRSKx<32>,
ElementC, cutlass::layout::TensorNCxHWx<32>,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape0,
WarpShape0,
InstructionShape,
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
64 / cutlass::sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
3,
cutlass::arch::OpMultiplyAddSaturate,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using Conv2dFprop0 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel0>;
using Conv2dFpropKernel1 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementA, cutlass::layout::TensorNCxHWx<32>,
ElementB, cutlass::layout::TensorCxRSKx<32>,
ElementC, cutlass::layout::TensorNCxHWx<32>,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape1,
WarpShape1,
InstructionShape,
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
64 / cutlass::sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
3,
cutlass::arch::OpMultiplyAddSaturate,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using Conv2dFprop1 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel1>;
B2bInterleavedNonFusedConv2dRun<Conv2dFprop0, Conv2dFprop1, 32> nonFusedConv2d;
std::cout << "Running Non-fused back-to-back INT8 interleaved Optimized Convolution Fprops...\n";
bool pass = nonFusedConv2d.run(conv2d_s8_sm80_problem_size_0, conv2d_s8_sm80_problem_size_1, cutlass::conv::SplitKMode::kSerial,
alpha0, beta0, alpha1, beta1);
if(pass)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return pass;
}
bool run_fused_conv2d_fprop_optimized_s8_sm80_rf_res() {
using ElementA = int8_t;
using ElementB = int8_t;
using ElementC = int8_t;
using ElementAccumulator = int32_t;
using ElementCompute = float;
ElementCompute alpha0 = ElementCompute(1);
//Fused kernel has built-in bias, setting beta=0
ElementCompute beta0 = ElementCompute(0);
ElementCompute alpha1 = ElementCompute(1);
ElementCompute beta1 = ElementCompute(1); //beta=1 for bias
using ThreadblockShape0 = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape0 = cutlass::gemm::GemmShape<16, 64, 64>;
using ThreadblockShape1 = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape1 = cutlass::gemm::GemmShape<16, 128, 64>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>;
using EpilogueOutputOp0 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
8 * InstructionShape::kN / 32,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>;
using EpilogueOutputOp1 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
64 / cutlass::sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>;
using B2bConv2dFpropKernel = typename cutlass::conv::kernel::DefaultB2bConv2dFprop<
ElementA, cutlass::layout::TensorNCxHWx<32>,
ElementB, cutlass::layout::TensorCxRSKx<32>,
ElementC, cutlass::layout::TensorNCxHWx<32>,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
3,
cutlass::arch::OpMultiplyAddSaturate,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using B2bConv2dFprop = cutlass::conv::device::B2bImplicitGemmConvolution<B2bConv2dFpropKernel>;
B2bInterleavedFusedConv2dRun<B2bConv2dFprop, 32> fusedConv2d;
std::cout << "Running Fused back-to-back INT8 interleaved Optimized Convolution Fprops with RF residency...\n";
bool pass = fusedConv2d.run(conv2d_s8_sm80_problem_size_0, conv2d_s8_sm80_problem_size_1, cutlass::conv::SplitKMode::kSerial,
alpha0, beta0, alpha1, beta1);
if(pass)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return pass;
}
int main() {
std::vector<bool (*)()>funcs = {
&run_nonfused_conv2d_fprop_optimized_s8_sm80,
&run_fused_conv2d_fprop_optimized_s8_sm80_rf_res
};
return testRun(80, funcs, "conv int8 RF residency");
}
////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/fused_two_convs_s8_sm80_rf.cu/0 | {
"file_path": "examples/13_two_tensor_op_fusion/fused_two_convs_s8_sm80_rf.cu",
"repo_id": "examples",
"token_count": 3356
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/transform/threadblock/predicated_vector_access_iterator.h"
#include "cutlass/transform/threadblock/vector_iterator.h"
#include "cutlass/transform/warp/vector_fragment_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "kernel/default_b2b_conv2d_fprop.h"
#include "kernel/b2b_implicit_gemm_convolution.h"
#include "threadblock/b2b_implicit_gemm_multistage.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassTensorOp convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag
>
struct DefaultB2bConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, LayoutA,
ThreadMapA0
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, LayoutB,
ThreadMapB0
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
// Use fragment iterator for A operand
using AccumulatorLayout = cutlass::layout::ColumnMajor;
using FragmentIteratorA1 =
cutlass::gemm::warp::MmaTensorOpFragmentIterator<
cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape
cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape
MmaCore1::Shape::kK, //kBlocksColumn
ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp0>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 2;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Warp-level iterators to load scale and bias vectors
using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator<
MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias,
LayoutScaleBias, InstructionShape, kElementsPerAccess>;
// Define iterators over tiles from the B operand
using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB,
ThreadMapB1
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmMultistage<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
arch::CacheOperation::Always,
IteratorB0,
SmemIteratorB0,
arch::CacheOperation::Global,
ThreadblockShape1,
FragmentIteratorA1,
IteratorAccumulatorScaleBias,
FragmentIteratorA1ScaleBias,
IteratorB1,
SmemIteratorB1,
arch::CacheOperation::Global,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int InterleavedK
>
struct DefaultB2bConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapA0 = typename MmaCore0::SmemThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA0
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB0 = typename MmaCore0::SmemThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB0
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
// Use fragment iterator for A operand
using AccumulatorLayout = cutlass::layout::RowMajor;
using FragmentIteratorA1 =
cutlass::gemm::warp::MmaTensorOpFragmentIterator<
cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape
cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape
MmaCore1::Shape::kK, //kBlocksColumn
ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp0>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 4;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Warp-level iterators to load scale and bias vectors
using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator<
MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias,
LayoutScaleBias, InstructionShape, kElementsPerAccess>;
using ThreadMapB1 = typename MmaCore1::SmemThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB1
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmMultistage<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
arch::CacheOperation::Always,
IteratorB0,
SmemIteratorB0,
arch::CacheOperation::Global,
ThreadblockShape1,
FragmentIteratorA1,
IteratorAccumulatorScaleBias,
FragmentIteratorA1ScaleBias,
IteratorB1,
SmemIteratorB1,
arch::CacheOperation::Global,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm and
/// multistage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag
>
struct DefaultB2bConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA0 = typename MmaCore0::IteratorThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, LayoutA,
ThreadMapA0
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB0 = typename MmaCore0::IteratorThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, LayoutB,
ThreadMapB0
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
// Use fragment iterator for A operand
using AccumulatorLayout = cutlass::layout::ColumnMajor;
using FragmentIteratorA1 =
cutlass::gemm::warp::MmaTensorOpFragmentIterator<
cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape
cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape
MmaCore1::Shape::kK, //kBlocksColumn
ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp0>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 2;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Warp-level iterators to load scale and bias vectors
using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator<
MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias,
LayoutScaleBias, InstructionShape, kElementsPerAccess>;
// Define iterators over tiles from the B operand
using ThreadMapB1 = typename MmaCore1::IteratorThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, LayoutB,
ThreadMapB1
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmMultistage<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
arch::CacheOperation::Always,
IteratorB0,
SmemIteratorB0,
arch::CacheOperation::Global,
ThreadblockShape1,
FragmentIteratorA1,
IteratorAccumulatorScaleBias,
FragmentIteratorA1ScaleBias,
IteratorB1,
SmemIteratorB1,
arch::CacheOperation::Global,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and
// multistage pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
int InterleavedK
>
struct DefaultB2bConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized
> {
// Define the core components from GEMM
using MmaCore0 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape0, WarpShape0, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
using MmaCore1 = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape1, WarpShape1, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapA0 = typename MmaCore0::SmemThreadMapA;
using IteratorA0 =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA0
>;
using SmemIteratorA0 = typename MmaCore0::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB0 = typename MmaCore0::SmemThreadMapB;
using IteratorB0 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape0::kK, ThreadblockShape0::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB0
>;
using SmemIteratorB0 = typename MmaCore0::SmemIteratorB;
// Use fragment iterator for A operand
using AccumulatorLayout = cutlass::layout::RowMajor;
using FragmentIteratorA1 =
cutlass::gemm::warp::MmaTensorOpFragmentIterator<
cutlass::MatrixShape<MmaCore1::WarpShape::kM, MmaCore1::InstructionShape::kK>, //warp shape
cutlass::MatrixShape<MmaCore0::WarpShape::kM, MmaCore0::WarpShape::kN>, //accumulator shape
MmaCore1::Shape::kK, //kBlocksColumn
ElementAccumulator, ElementA, AccumulatorLayout, InstructionShape, EpilogueOutputOp0>;
/// Define iterators over tiles from scale/bias vectors
using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute;
using LayoutScaleBias = layout::RowMajor; //vector layout doesn't really matter
static int const kElementsPerAccess = 4;
using IteratorAccumulatorScaleBias =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
cutlass::MatrixShape<ThreadblockShape0::kM, ThreadblockShape0::kN>,
cutlass::MatrixShape<WarpShape1::kM, WarpShape1::kK>,
ElementScaleBias, LayoutScaleBias, kElementsPerAccess>
>;
// Warp-level iterators to load scale and bias vectors
using FragmentIteratorA1ScaleBias = cutlass::transform::warp::VectorFragmentIterator<
MatrixShape<1, IteratorAccumulatorScaleBias::Fragment::kElements>, ElementScaleBias,
LayoutScaleBias, InstructionShape, kElementsPerAccess>;
using ThreadMapB1 = typename MmaCore1::SmemThreadMapB;
using IteratorB1 =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape1::kK, ThreadblockShape1::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB1
>;
using SmemIteratorB1 = typename MmaCore1::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp1 = typename MmaCore1::MmaTensorOp;
using MmaPolicy0 = typename MmaCore0::MmaPolicy;
using MmaPolicy1 = typename MmaCore1::MmaPolicy;
// Define the Mma
using B2bMma = threadblock::B2bImplicitGemmMultistage<
ThreadblockShape0,
IteratorA0,
SmemIteratorA0,
arch::CacheOperation::Always,
IteratorB0,
SmemIteratorB0,
arch::CacheOperation::Global,
ThreadblockShape1,
FragmentIteratorA1,
IteratorAccumulatorScaleBias,
FragmentIteratorA1ScaleBias,
IteratorB1,
SmemIteratorB1,
arch::CacheOperation::Global,
EpilogueOutputOp0,
MmaPolicy0,
MmaPolicy1,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape1,
WarpMmaTensorOp1,
1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::B2bImplicitGemmConvolution<
B2bMma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop_sm80.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop_sm80.h",
"repo_id": "examples",
"token_count": 9245
} | 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped Back-to-back fused GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "threadblock/b2b_mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape0_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorA0_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA0_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB0_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB0_,
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape1_,
/// Iterates over the intermediate accumulator tile
// (concept::MmaTensorOpFragmentIterator)
typename FragmentIteratorA1_,
/// Iterates over vectors of scale and bias vector in global memory
// (concept: VectorIterator)
typename IteratorAccumulatorScaleBias_,
/// FragmentIterator to load Scale or Bias vector from threadblock fragment
typename FragmentIteratorA1ScaleBias_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB1_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB1_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Output operator for 1st Gemm(concept: epilogue::thread::LinearCombinationClamp, etc...)
typename OutputOp_,
/// Policy describing tuning details (concept: MmaPipelinedPolicy)
typename Policy0_,
/// Policy describing tuning details (concept: MmaPipelinedPolicy)
typename Policy1_,
/// Transformation applied to A0 operand
typename TransformA0_ = NumericArrayConverter<
typename SmemIteratorA0_::Element,
typename IteratorA0_::Element,
IteratorA0_::Fragment::kElements>,
///
/// Transformation applied to B0 operand
typename TransformB0_ = NumericArrayConverter<
typename SmemIteratorB0_::Element,
typename IteratorB0_::Element,
IteratorB0_::Fragment::kElements>,
///
/// Transformation applied to B1 operand
typename TransformB1_ = NumericArrayConverter<
typename SmemIteratorB1_::Element,
typename IteratorB1_::Element,
IteratorB1_::Fragment::kElements>,
/// Used for partial specialization
typename Enable = bool
>
class B2bMmaPipelined :
public B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, 2> {
public:
///< Base class
using Base = B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, 2>;
using Shape0 = Shape0_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using IteratorA0 = IteratorA0_; ///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA0;
using IteratorB0 = IteratorB0_; ///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB0;
using Policy0 = Policy0_; ///< Policy describing tuning details
using SmemIteratorA0 = SmemIteratorA0_;
using SmemIteratorB0 = SmemIteratorB0_;
using Shape1 = Shape1_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using FragmentIteratorA1 = FragmentIteratorA1_; ///< Iterates over intermediate accumulator tile
using IteratorAccumulatorScaleBias = IteratorAccumulatorScaleBias_; ///< Iterates over tiles of the scale and bias vectors in global memory
using FragmentIteratorA1ScaleBias =
FragmentIteratorA1ScaleBias_; ///< WarpIterator to load Scale or Bias vector from the threadblock fragment
using IteratorB1 = IteratorB1_; ///< Iterates over tiles of B operand in global memory
using Policy1 = Policy1_; ///< Policy describing tuning details
using Policy = Policy1; ///< Export Policy1 as the threadblock-level Mma's policy
using Shape = Shape1;
using SmemIteratorB1 = SmemIteratorB1_;
using ElementC = ElementC_; ///< Data type of accumulator matrix
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
using OutputOp = OutputOp_; ///< Epilogue after 1st Gemm
static const bool PerChannelScale = (OutputOp::kScale ==
epilogue::thread::ScaleType::OnlyAlphaPerChannelScaling);
using TransformA0 = TransformA0_;
using TransformB0 = TransformB0_;
using TransformB1 = TransformB1_;
//
// Dependent types
//
/// Fragment of operand A loaded from global memory
using FragmentA0 = typename IteratorA0::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB0 = typename IteratorB0::Fragment;
/// Fragment of accumulator tile
using FragmentC0 = typename Policy0::Operator::FragmentC;
/// Warp-level Mma
using Operator0 = typename Policy0::Operator;
/// Fragment of Scale and Bias loaded from global memory
using FragmentA1ScaleBias = typename IteratorAccumulatorScaleBias::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB1 = typename IteratorB1::Fragment;
/// Fragment of accumulator tile
using FragmentC1 = typename Policy1::Operator::FragmentC;
/// Warp-level Mma
using Operator1 = typename Policy1::Operator;
/// Obtain the arch tag from the warp-level operator
using ArchTag = typename Policy0::Operator::ArchTag;
/// Complex transform on A0 operand
static ComplexTransform const kTransformA0 = Operator0::kTransformA;
/// Complex transform on B0 operand
static ComplexTransform const kTransformB0 = Operator0::kTransformB;
/// Complex transform on B1 operand
static ComplexTransform const kTransformB1 = Operator1::kTransformB;
/// Complex transform exports needed by higher-level kernels
static ComplexTransform const kTransformA = kTransformA0;
static ComplexTransform const kTransformB = kTransformB0;
/// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
static_assert((Base::kStages==2), "MmaPipelined requires kStages set to value 2");
private:
using WarpFragmentA0 = typename Operator0::FragmentA;
using WarpFragmentB0 = typename Operator0::FragmentB;
/// Warp Fragment of operand A1 loaded from accmulator tile
using WarpFragmentA1 = typename FragmentIteratorA1::Fragment;
/// Warp Fragment of operand A1 scale and bias loaded from threadblock fragment
using WarpFragmentA1ScaleBias =
typename FragmentIteratorA1ScaleBias::Fragment;
using WarpFragmentB1 = typename Operator1::FragmentB;
protected:
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA0 smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B0 operand to shared memory
SmemIteratorB0 smem_iterator_B0_;
/// Iterator to write threadblock-scoped tile of B1 operand to shared memory
SmemIteratorB1 smem_iterator_B1_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
B2bMmaPipelined(
typename Base::B2bMmaSharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM
int thread_idx, ///< ID within the threadblock
int warp_idx, ///< ID of warp
int lane_idx, ///< ID of each thread within a warp
int problem_size_0_n ///< GEMM0 N is used for accumulator extent
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.shared_storage0.operand_A_ref(), thread_idx),
smem_iterator_B0_(shared_storage.shared_storage0.operand_B_ref(), thread_idx),
smem_iterator_B1_(shared_storage.shared_storage1.operand_B_ref(), thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
//These should stay the same across different GEMM layers
int warp_idx_mn = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN);
int warp_idx_k = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount0::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount0::kM;
//These may change across different GEMM layers
int tile_offset_k_0 = Base::kWarpGemmIterations0 * warp_idx_k;
int tile_offset_k_1 = Base::kWarpGemmIterations1 * warp_idx_k;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A0_.add_tile_offset({warp_idx_m, tile_offset_k_0});
this->warp_tile_iterator_B0_.add_tile_offset({tile_offset_k_0, warp_idx_n});
this->warp_tile_iterator_B1_.add_tile_offset({tile_offset_k_1, warp_idx_n});
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
int gemm_k_iterations_0, ///< number of iterations of the mainloop
FragmentC1 &accum, ///< destination accumulator tile
IteratorA0 iterator_A, ///< iterator over A operand in global memory
IteratorB0 iterator_B0, ///< iterator over B0 operand in global memory
IteratorAccumulatorScaleBias iterator_A1_scale, ///< iterator over A1 operand scale vectors in global memory
IteratorAccumulatorScaleBias iterator_A1_bias, ///< iterator over A1 operand bias vectors in global memory
IteratorB1 iterator_B1, ///< iterator over B1 operand in global memory
FragmentC0 const &src_accum, ///< source accumualtor tile
OutputOp output_op_0, ///< epilogue operation after 1st Gemm
TransformA0 transform_A0 = TransformA0(), ///< transformation applied to A0 fragment
TransformB0 transform_B0 = TransformB0(), ///< transformation applied to B0 fragment
TransformB1 transform_B1 = TransformB1()) { ///< transformation applied to B1 fragment
//
// Prologue
//
// Perform accumulation in the 'd' output operand
FragmentC0 accum0 = src_accum;
FragmentA0 tb_frag_A;
FragmentB0 tb_frag_B0;
tb_frag_A.clear();
tb_frag_B0.clear();
// The last kblock is loaded in the prolog
iterator_A.load(tb_frag_A);
iterator_B0.load(tb_frag_B0);
++iterator_A;
++iterator_B0;
this->smem_iterator_A_.store(transform_A0(tb_frag_A));
this->smem_iterator_B0_.store(transform_B0(tb_frag_B0));
++this->smem_iterator_A_;
++this->smem_iterator_B0_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA0 warp_frag_A0[2];
WarpFragmentB0 warp_frag_B0[2];
this->warp_tile_iterator_A0_.set_kgroup_index(0);
this->warp_tile_iterator_B0_.set_kgroup_index(0);
this->warp_tile_iterator_A0_.load(warp_frag_A0[0]);
this->warp_tile_iterator_B0_.load(warp_frag_B0[0]);
++this->warp_tile_iterator_A0_;
++this->warp_tile_iterator_B0_;
Operator0 warp_mma0;
int smem_write_stage_idx = 1;
// Avoid reading out of bounds
iterator_A.clear_mask(gemm_k_iterations_0 <= 1);
iterator_B0.clear_mask(gemm_k_iterations_0 <= 1);
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing
// shared memory loads (which have the tightest latency requirement).
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations_0 > 0; --gemm_k_iterations_0) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations0 - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(transform_A0(tb_frag_A));
this->smem_iterator_B0_.store(transform_B0(tb_frag_B0));
__syncthreads();
++this->smem_iterator_A_;
++this->smem_iterator_B0_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B0_.add_tile_offset({-Base::kStages, 0});
}
else {
this->warp_tile_iterator_A0_.add_tile_offset(
{0, -Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0});
this->warp_tile_iterator_B0_.add_tile_offset(
{-Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0,
0});
}
smem_write_stage_idx ^= 1;
}
this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);
this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);
this->warp_tile_iterator_A0_.load(warp_frag_A0[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B0_.load(warp_frag_B0[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A0_;
++this->warp_tile_iterator_B0_;
if (warp_mma_k == 0) {
iterator_A.load(tb_frag_A);
iterator_B0.load(tb_frag_B0);
++iterator_A;
++iterator_B0;
// Avoid reading out of bounds if this was the last loop iteration
iterator_A.clear_mask(gemm_k_iterations_0 <= 2);
iterator_B0.clear_mask(gemm_k_iterations_0 <= 2);
}
warp_mma0(accum0, warp_frag_A0[warp_mma_k % 2],
warp_frag_B0[warp_mma_k % 2], accum0);
}
}
//2nd Gemm
/// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile
FragmentIteratorA1 warp_tile_iterator_A1_(accum0);
//
// Prologue
//
FragmentA1ScaleBias tb_frag_A1_scale;
FragmentA1ScaleBias tb_frag_A1_bias;
FragmentIteratorA1ScaleBias warp_tile_iterator_A1_scale_(tb_frag_A1_scale);
FragmentIteratorA1ScaleBias warp_tile_iterator_A1_bias_(tb_frag_A1_bias);
FragmentB1 tb_frag_B1;
if(PerChannelScale)
tb_frag_A1_scale.clear();
tb_frag_A1_bias.clear();
tb_frag_B1.clear();
// The last kblock is loaded in the prolog
if(PerChannelScale)
iterator_A1_scale.load(tb_frag_A1_scale);
iterator_A1_bias.load(tb_frag_A1_bias);
iterator_B1.load(tb_frag_B1);
if(PerChannelScale)
++iterator_A1_scale;
++iterator_A1_bias;
++iterator_B1;
this->smem_iterator_B1_.store(transform_B1(tb_frag_B1));
++this->smem_iterator_B1_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA1ScaleBias warp_frag_A1_scale[2];
WarpFragmentA1ScaleBias warp_frag_A1_bias[2];
WarpFragmentA1 warp_frag_A1[2];
WarpFragmentB1 warp_frag_B1[2];
this->warp_tile_iterator_B1_.set_kgroup_index(0);
if(PerChannelScale)
warp_tile_iterator_A1_scale_.load(warp_frag_A1_scale[0]);
warp_tile_iterator_A1_bias_.load(warp_frag_A1_bias[0]);
warp_tile_iterator_A1_.load(warp_frag_A1[0], warp_frag_A1_scale[0],
warp_frag_A1_bias[0], output_op_0);
this->warp_tile_iterator_B1_.load(warp_frag_B1[0]);
++warp_tile_iterator_A1_;
if(PerChannelScale)
++warp_tile_iterator_A1_scale_;
++warp_tile_iterator_A1_bias_;
++this->warp_tile_iterator_B1_;
Operator1 warp_mma1;
smem_write_stage_idx = 1;
int gemm_k_iterations_1 = FragmentIteratorA1::Policy::kIterations / Base::kWarpGemmIterations1;
// Avoid reading out of bounds
iterator_B1.clear_mask(gemm_k_iterations_1 <= 1);
//
// Mainloop
//
// Note: The main loop does not support Base::WarpGemmIterations == 2.
CUTLASS_PRAGMA_UNROLL
for (; gemm_k_iterations_1 > 0; --gemm_k_iterations_1) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations1 - 1) {
// Write fragments to shared memory
this->smem_iterator_B1_.store(transform_B1(tb_frag_B1));
__syncthreads();
++this->smem_iterator_B1_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0});
}
else {
this->warp_tile_iterator_B1_.add_tile_offset(
{-Base::kStages * Policy1::kPartitionsK *
Base::kWarpGemmIterations1,
0});
}
smem_write_stage_idx ^= 1;
if(PerChannelScale) {
tb_frag_A1_scale.clear();
iterator_A1_scale.load(tb_frag_A1_scale);
++iterator_A1_scale;
}
tb_frag_A1_bias.clear();
iterator_A1_bias.load(tb_frag_A1_bias);
++iterator_A1_bias;
}
this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1);
if(PerChannelScale)
warp_tile_iterator_A1_scale_.load(warp_frag_A1_scale[(warp_mma_k + 1) % 2]);
warp_tile_iterator_A1_bias_.load(warp_frag_A1_bias[(warp_mma_k + 1) % 2]);
warp_tile_iterator_A1_.load(warp_frag_A1[(warp_mma_k + 1) % 2],
warp_frag_A1_scale[(warp_mma_k + 1) % 2],
warp_frag_A1_bias[(warp_mma_k + 1) % 2],
output_op_0);
this->warp_tile_iterator_B1_.load(warp_frag_B1[(warp_mma_k + 1) % 2]);
if(PerChannelScale)
++warp_tile_iterator_A1_scale_;
++warp_tile_iterator_A1_bias_;
++warp_tile_iterator_A1_;
++this->warp_tile_iterator_B1_;
if (warp_mma_k == 0) {
iterator_B1.load(tb_frag_B1);
++iterator_B1;
// Avoid reading out of bounds if this was the last loop iteration
iterator_B1.clear_mask(gemm_k_iterations_1 <= 2);
}
warp_mma1(accum, warp_frag_A1[warp_mma_k % 2],
warp_frag_B1[warp_mma_k % 2], accum);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined.h",
"repo_id": "examples",
"token_count": 8674
} | 8 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to fuse activation's per channel scale+bias+relu
into the wgrad mainloop.
Compared with original fprop kernel, this example has two more vectors, one for
the scale and one for the bias. The length of the vectors are the same as the
activation channel number. This kernels loads the vectors when the associated
activation channels are loaded in the mainloop. Between reading the
activations and scale/bias data from the shared memory and calling tensor core
instructions, scale+bias+relu is computed in the register file.
This example is customized for Ampere 16816 fp16 tensor core instruction.
Changing to different data types or different tensor core instruction require
source code changing. See
include/cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h for more
technical details.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_wgrad_fusion.h"
#include "cutlass/conv/device/implicit_gemm_convolution_fusion.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementInputScaleBias = cutlass::half_t; // Data type of elements in input sclae and bias vectors
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutInputScaleBias = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 5;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv2dWgradFusionKernel = typename cutlass::conv::kernel::DefaultConv2dWgradFusion<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementInputScaleBias, LayoutInputScaleBias,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemmFusion = cutlass::conv::device::ImplicitGemmConvolutionFusion<Conv2dWgradFusionKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(true),
measure_performance(false),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size,
cutlass::MatrixCoord stride) {
this->input_size = input_size;
this->filter_size = filter_size;
conv_stride = stride;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "26_ampere_wgrad_mainloop_fusion example\n\n"
<< " This example fuses scale+bias+relu of the activation into Ampere's\n"
<< " Tensor Core operators on F16 data types to compute\n"
<< " backward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/26_ampere_wgrad_mainloop_fusion/26_ampere_wgrad_mainloop_fusion --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/26_ampere_wgrad_mainloop_fusion/26_ampere_wgrad_mainloop_fusion --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Stride_H,Stride_W,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.conv_stride.row() << ","
<< options.conv_stride.column() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.output_size());
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.input_size);
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_transformed_b(options.input_size);
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_b_scale({1, options.input_size.c()});
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_b_bias({1, options.input_size.c()});
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.filter_size);
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill scale vector for tensor B on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_scale.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill bias vector for tensor B on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_bias.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0);
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(
tensor_d.host_view());
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_b_scale.sync_device();
tensor_b_bias.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
typename ImplicitGemmFusion::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_b_scale.device_ref(),
tensor_b_bias.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemmFusion implicit_gemm_fusion_op;
size_t workspace_size = implicit_gemm_fusion_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_fusion_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_fusion_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on device...\n";
// Compute scale + bias + relu in host code
for (int n = 0; n < options.input_size.n(); ++n) {
for (int h = 0; h < options.input_size.h(); ++h) {
for (int w = 0; w < options.input_size.w(); ++w) {
for (int c = 0; c < options.input_size.c(); ++c) {
tensor_transformed_b.at({n, h, w, c}) = std::max(
ElementOutput(0), ElementOutput(tensor_b.at({n, h, w, c}) *
tensor_b_scale.at({0, c}) +
tensor_b_bias.at({0, c})));
}
}
}
}
tensor_transformed_b.sync_device();
// Compute with reference implementation
cutlass::reference::device::Conv2dWgrad<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_a.device_ref(),
tensor_transformed_b.device_ref(),
tensor_c.device_ref(),
tensor_ref_d.device_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_d.sync_host();
tensor_ref_d.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "26_ampere_wgrad_mainloop_fusion_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major == 8 && props.minor == 0)) {
std::cerr << "This test must run on SM80 A100.\n";
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {34, 408};
struct Benchmark {
int h, w, c, k, r, s, stride_h, stride_w;
} layers[] = {
{56, 56, 64, 256, 1, 1, 1, 1},
{56, 56, 64, 64, 1, 1, 1, 1},
{56, 56, 64, 64, 3, 3, 1, 1},
{56, 56, 256, 64, 1, 1, 1, 1},
{56, 56, 256, 512, 1, 1, 2, 2},
{56, 56, 256, 128, 1, 1, 1, 1},
{56, 56, 128, 128, 3, 3, 2, 2},
{28, 28, 128, 512, 1, 1, 1, 1},
{28, 28, 512, 128, 1, 1, 1, 1},
{28, 28, 128, 128, 3, 3, 1, 1},
{28, 28, 512, 1024, 1, 1, 2, 2},
{28, 28, 512, 256, 1, 1, 1, 1},
{28, 28, 256, 256, 3, 3, 2, 2},
{14, 14, 256, 1024, 1, 1, 1, 1},
{14, 14, 1024, 256, 1, 1, 1, 1},
{14, 14, 256, 256, 3, 3, 1, 1},
{14, 14, 1024, 2048, 1, 1, 2, 2},
{14, 14, 1024, 512, 1, 1, 1, 1},
{14, 14, 512, 512, 3, 3, 2, 2},
{ 7, 7, 512, 2048, 1, 1, 1, 1},
{ 7, 7, 2048, 512, 1, 1, 1, 1},
{ 7, 7, 512, 512, 3, 3, 1, 1},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c},
{layer.k, layer.r, layer.s, layer.c},
{layer.stride_h, layer.stride_w});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/26_ampere_wgrad_mainloop_fusion/ampere_wgrad_mainloop_fusion.cu/0 | {
"file_path": "examples/26_ampere_wgrad_mainloop_fusion/ampere_wgrad_mainloop_fusion.cu",
"repo_id": "examples",
"token_count": 9921
} | 9 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Contains additional metadata about layout permute functions used in the example.
*/
#include "cutlass/tensor_coord.h"
#include "cutlass/layout/permute.h"
/// Additional permutation metadata to facilitate testing/printing
template<typename PermuteLayout>
struct PermuteInfo;
/// Specialization for default case (no permute). Other specializations must follow this template.
template<>
struct PermuteInfo<cutlass::layout::NoPermute> {
/// Whether this is a BMM or GEMM permutation (NoPermute can actually be either)
static bool constexpr kBatched = false;
/// Minimal divisor for row extent
static int constexpr kRowFactor = 1;
/// Minimum divisor for column extent
static int constexpr kColumnFactor = 1;
/// Minimum divisor for batch size dimension
static int constexpr kBatchFactor = 1;
/// Tensor layout used in permutation operation
using Layout = cutlass::layout::PackedVectorLayout;
static std::string name() {
return "NoPermute";
}
/// User-friendly description of the permute operation
static std::string desc() {
return "no permutation";
}
/// Infer original higher-rank tensor shape from GEMM/BMM matrix extents.
/// For direct (output) permutations, must be a simple reshape of extent.
/// For inverse (input) permutations, must return shape *before* permute operation.
/// In case of NoPermute, simply use a linear (rank 1) view of the memory
static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
return Layout::TensorCoord(extent.row() * extent.column() * batch_count);
}
/// Compute the permuted higher-rank tensor shape from the original shape.
static Layout::TensorCoord permute(Layout::TensorCoord const &s) {
return s;
}
};
template<int D1>
struct PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>> {
static bool constexpr kBatched = true;
static int constexpr kRowFactor = 1;
static int constexpr kColumnFactor = 1;
static int constexpr kBatchFactor = D1;
using Layout = cutlass::layout::TensorNHWC;
static std::string name() {
return "Tensor4DPermuteBMM0213<" + std::to_string(D1) + ">";
}
static std::string desc() {
return "batched GEMM permutation [0, 2, 1, 3]";
}
static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int D0 = batch_count / D1;
int D2 = extent.row();
int D3 = extent.column();
return {D0, D1, D2, D3};
}
static Layout::TensorCoord permute(Layout::TensorCoord const &s) {
return {s[0], s[2], s[1], s[3]};
}
};
template<int D1>
struct PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0213RowMajorInverse<D1>>
: public PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>> {
static bool constexpr kBatched = true;
static int constexpr kRowFactor = 1;
static int constexpr kColumnFactor = D1;
static int constexpr kBatchFactor = 1;
using Base = PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>>;
using Layout = typename Base::Layout;
static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int D0 = batch_count;
int D2 = extent.row();
int D3 = extent.column() / D1;
return {D0, D1, D2, D3};
}
};
template<int D1>
struct PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>> {
static bool constexpr kBatched = true;
static int constexpr kRowFactor = 1;
static int constexpr kColumnFactor = 1;
static int constexpr kBatchFactor = D1;
using Layout = cutlass::layout::TensorNHCW;
static std::string name() {
return "Tensor4DPermuteBMM0321<" + std::to_string(D1) + ">";
}
static std::string desc() {
return "batched GEMM permutation [0, 3, 2, 1]";
}
static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int D0 = batch_count / D1;
int D2 = extent.row();
int D3 = extent.column();
return {D0, D1, D2, D3};
}
static Layout::TensorCoord permute(Layout::TensorCoord const &s) {
return {s[0], s[3], s[2], s[1]};
}
};
template<int D1>
struct PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajorInverse<D1>>
: public PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>> {
static bool constexpr kBatched = true;
static int constexpr kRowFactor = D1;
static int constexpr kColumnFactor = 1;
static int constexpr kBatchFactor = 1;
using Base = PermuteInfo<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>>;
using Layout = typename Base::Layout;
static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int D0 = batch_count;
int D2 = extent.row() / D1;
int D3 = extent.column();
return {D0, D1, D2, D3};
}
};
template<int D1, int D2>
struct PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>> {
static bool constexpr kBatched = false;
static int constexpr kRowFactor = D1;
static int constexpr kColumnFactor = D2;
static int constexpr kBatchFactor = 1;
using Layout = cutlass::layout::TensorNHWC;
static std::string name() {
return "Tensor4DPermute0213<" + std::to_string(D1) + "," + std::to_string(D2) + ">";
}
static std::string desc() {
return "normal GEMM permutation [0, 2, 1, 3]";
}
static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int D0 = extent.row() / D1;
int D3 = extent.column() / D2;
return {D0, D1, D2, D3};
}
static Layout::TensorCoord permute(Layout::TensorCoord const &s) {
return {s[0], s[2], s[1], s[3]};
}
};
template<int D1, int D2>
struct PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajorInverse<D1, D2>>
: public PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>> {
static bool constexpr kBatched = false;
static int constexpr kRowFactor = D2;
static int constexpr kColumnFactor = D1;
static int constexpr kBatchFactor = 1;
using Base = PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>>;
using Layout = typename Base::Layout;
static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int D0 = extent.row() / D2;
int D3 = extent.column() / D1;
return {D0, D1, D2, D3};
}
};
template<int D1, int D2>
struct PermuteInfo<cutlass::layout::Tensor4DPermute0213ColumnMajor<D1, D2>>
: public PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>> {
using Layout = cutlass::layout::TensorCWHN;
};
template<int D1, int D2>
struct PermuteInfo<cutlass::layout::Tensor4DPermute0213ColumnMajorInverse<D1, D2>>
: public PermuteInfo<cutlass::layout::Tensor4DPermute0213RowMajorInverse<D1, D2>> {
using Layout = cutlass::layout::TensorCWHN;
};
template<int T1, int T2, int T3>
struct PermuteInfo<cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>> {
static bool constexpr kBatched = false;
static int constexpr kRowFactor = T1;
static int constexpr kColumnFactor = T2 * T3;
static int constexpr kBatchFactor = 1;
using Layout = cutlass::layout::TensorNDHWC;
static std::string name() {
return "Tensor5DPermute20314<" + std::to_string(T1) + "," + std::to_string(T2) + "," + std::to_string(T3) + ">";
}
static std::string desc() {
return "normal GEMM permutation [2, 0, 3, 1, 4]";
}
static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count)
{
int const T0 = extent.row() / T1;
int const T4 = extent.column() / (T2 * T3);
return {T0, T1, T2, T3, T4};
}
static Layout::TensorCoord permute(Layout::TensorCoord const &s)
{
return {s[2], s[0], s[3], s[1], s[4]};
}
};
template<int T1, int T2, int T3>
struct PermuteInfo<cutlass::layout::Tensor5DPermute20314RowMajorInverse<T1, T2, T3>>
: public PermuteInfo<cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>> {
static bool constexpr kBatched = false;
static int constexpr kRowFactor = T2;
static int constexpr kColumnFactor = T1 * T3;
static int constexpr kBatchFactor = 1;
using Base = PermuteInfo<cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3>>;
using Layout = typename Base::Layout;
static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int const T0 = extent.row() / T2;
int const T4 = extent.column() / (T1 * T3);
return {T0, T1, T2, T3, T4};
}
};
template<int T1, int T2, int T3>
struct PermuteInfo<cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>> {
static bool constexpr kBatched = false;
static int constexpr kRowFactor = T1;
static int constexpr kColumnFactor = T2 * T3;
static int constexpr kBatchFactor = 1;
using Layout = cutlass::layout::TensorCWHDN;
static std::string name() {
return "Tensor5DPermute02413<" + std::to_string(T1) + "," + std::to_string(T2) + "," + std::to_string(T3) + ">";
}
static std::string desc() {
return "normal GEMM permutation [0, 2, 4, 1, 3]";
}
using Coord = cutlass::Tensor5DCoord;
static Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count)
{
int const T0 = extent.row() / T1;
int const T4 = extent.column() / (T2 * T3);
return {T0, T1, T2, T3, T4};
}
static Layout::TensorCoord permute(Layout::TensorCoord const &s)
{
return {s[0], s[2], s[4], s[1], s[3]};
}
};
template<int T1, int T2, int T3>
struct PermuteInfo<cutlass::layout::Tensor5DPermute02413ColumnMajorInverse<T1, T2, T3>>
: public PermuteInfo<cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>> {
static bool constexpr kBatched = false;
static int constexpr kRowFactor = T2;
static int constexpr kColumnFactor = T1 * T3;
static int constexpr kBatchFactor = 1;
using Base = PermuteInfo<cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>>;
using Layout = typename Base::Layout;
static typename Layout::TensorCoord original_shape(cutlass::MatrixCoord extent, int batch_count) {
int const T0 = extent.row() / T2;
int const T4 = extent.column() / (T1 * T3);
return {T0, T1, T2, T3, T4};
}
};
| examples/39_gemm_permute/permute_info.h/0 | {
"file_path": "examples/39_gemm_permute/permute_info.h",
"repo_id": "examples",
"token_count": 4266
} | 10 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import argparse
import torch
import sys
import os
from piped_subprocess import PipedSubprocess, TORCH_DTYPE_NAME
import math
parser = argparse.ArgumentParser()
parser.add_argument("example_exe", type=str, help="Path to the 41_fused_multi_head_attention_backward executable")
args = parser.parse_args()
torch.manual_seed(0)
dtype = torch.float16
B, Mq, Mkv, H, K, Kv = 2, 1024, 1024, 5, 128, 128
causal = True
repeat_count = 100
ATOL = {
torch.float: 5e-4,
torch.half: 9.5e-2,
torch.bfloat16: 7e-1,
}[dtype]
RTOL = {
torch.float: 1e-4,
torch.half: 2e-2,
torch.bfloat16: 1e-1,
}[dtype]
assert not (causal and Mq < Mkv), "causal only supports seqlenK <= seqlenQ"
fmha_bw_binary = args.example_exe
if not os.path.isfile(fmha_bw_binary):
print(f"""No such file: `{fmha_bw_binary}`\nDid you forget to run "make 41_fused_multi_head_attention"?""")
sys.exit(1)
def create_lower_triangular_mask():
return torch.triu(torch.full( # type: ignore
[1, Mq, Mkv],
dtype=dtype,
fill_value=float("-inf"),
), diagonal=1)
def ref_mha_bmk(q, k, v, mask):
# Multi-head attention with inputs/outputs in BMK format
q = q.float()
k = k.float()
v = v.float()
q = q * (1 / q.shape[-1] ** 0.5)
attn = q @ k.transpose(-2, -1)
if mask is not None:
attn += mask
attn_max = attn.max(-1, True).values
attn_norm = (attn - attn_max).exp().sum(-1, True)
attn = attn.softmax(-1)
lse = attn_max + attn_norm.log()
lse = lse.squeeze(2)
return attn @ v, lse
def bmhk2bmk(t):
return t.permute((0, 2, 1, 3)).reshape(
[t.shape[0] * t.shape[2], t.shape[1], t.shape[3]]
)
def ref_mha_bmhk(q, k, v, mask):
# Multi-head attention with inputs/outputs in BMHK format
assert q.ndim == 4
out, lse = ref_mha_bmk(bmhk2bmk(q), bmhk2bmk(k), bmhk2bmk(v), mask=mask)
out = out.reshape([q.shape[0], q.shape[2], q.shape[1], v.shape[3]])
return out.permute((0, 2, 1, 3)), lse.reshape([q.shape[0], q.shape[2], q.shape[1]])
def ref_mha_bw_bmhk(q, k, v, mask, lse, out, grad_out, delta):
lse = lse[:, :, :q.shape[1]] #BMH, unpad Q dimension
delta = delta.reshape([-1, delta.shape[-1], 1])
# bmhk -> bmk
q, k, v, out, grad_out = [bmhk2bmk(x).float() for x in (q, k, v, out, grad_out)]
attn_T = k @ q.transpose(-2, -1)
if mask is not None:
attn_T += mask.transpose(-2, -1)
attn_T = attn_T * (1 / q.shape[-1] ** 0.5)
attn_T = attn_T - lse.reshape([-1, 1, lse.shape[-1]])
attn_T = attn_T.exp()
grad_v = attn_T @ grad_out
dov = grad_out @ v.transpose(-2, -1)
tmp = (dov - delta) * attn_T.transpose(-2, -1)
tmp = tmp / (q.shape[-1] ** 0.5)
grad_q = tmp @ k
grad_k = tmp.transpose(-2, -1) @ q
return [x.reshape([B, H, x.shape[1], x.shape[-1]]).permute([0, 2, 1, 3]) for x in [grad_q, grad_k, grad_v]]
print("initializing tensors...")
query = torch.randn([B, Mq, H, K], dtype=dtype)
key = 3 * torch.randn([B, Mkv, H, K], dtype=dtype)
value = 3 * torch.randn([B, Mkv, H, Kv], dtype=dtype)
mask = create_lower_triangular_mask() if causal else None
# let PyTorch compute gradients
query.requires_grad_(True)
key.requires_grad_(True)
value.requires_grad_(True)
print("computing fw...")
out, lse = ref_mha_bmhk(query, key, value, mask=mask)
out = out.to(dtype).contiguous()
grad_out = 3 * torch.randn([B, Mq, H, Kv], dtype=dtype)
print("computing bw with autograd...")
out.backward(grad_out)
scale = (1 / query.shape[-1] ** 0.5)
# Additional data needed by the kernel
delta = (grad_out.float() * out.float()).sum(-1).transpose(-2, -1).contiguous()
pad_amount = (32 - (lse.shape[2] % 32)) % 32
lse = torch.nn.functional.pad(lse, [0, pad_amount], value=math.inf)
print("computing bw with reference implem...")
gQr, gKr, gVr = ref_mha_bw_bmhk(query, key, value, mask, lse, out, grad_out, delta)
with PipedSubprocess(fmha_bw_binary) as bw_kernel:
# Send kernel arguments
bw_kernel.write(
TORCH_DTYPE_NAME[query.dtype],
"scale", scale,
"head_dim", K,
"head_dim_value", Kv,
"num_queries", Mq,
"num_keys", Mkv,
"num_heads", H,
"custom_mask_type", (1 if causal else 0),
"num_batches", B,
"repeat_count", repeat_count,
"num_splits_key", (Mkv // 128),
)
bw_kernel.writeTensor(query, "query", ["q_strideB", "q_strideM", "q_strideH"])
bw_kernel.writeTensor(key, "key", ["k_strideB", "k_strideM", "k_strideH"])
bw_kernel.writeTensor(value, "value", ["v_strideB", "v_strideM", "v_strideH"])
bw_kernel.writeTensor(lse, "logsumexp", ["lse_strideB", "lse_strideH"])
bw_kernel.writeTensor(out, "output", ["o_strideB", "o_strideM", "o_strideH"])
bw_kernel.writeTensor(grad_out, "grad_output", ["gO_strideB", "gO_strideM", "gO_strideH"])
bw_kernel.writeTensor(delta, "delta", ["delta_strideB", "delta_strideH"])
if bw_kernel.read() != "OK":
print("Got unexpected output")
print(bw_kernel.subp.communicate()[0])
sys.exit(0)
# Read kernel output
gQ = bw_kernel.readTensor("grad_query", ["gQ_strideB", "gQ_strideM", "gQ_strideH"], query.shape).float()
gK = bw_kernel.readTensor("grad_key", ["gK_strideB", "gK_strideM", "gK_strideH"], key.shape).float()
gV = bw_kernel.readTensor("grad_value", ["gV_strideB", "gV_strideM", "gV_strideH"], value.shape).float()
runtime_ms = float(bw_kernel.readNamed("runtime_ms"))
float_ops = B * H * sum([
# att = Q @ K.transpose
Mq * Mkv * K * 2,
# att @ dO
Mkv * Mq * Kv * 2,
# dov = dO @ V
Mq * Kv * Mkv * 2,
# dov @ K
Mq * K * Mkv * 2,
# dov @ Q
Mq * K * Mkv * 2,
])
if causal:
float_ops //= 2
print(f"""
Fused multi-head attention - backward
batch_size={B}
num_queries={Mq}
num_keys={Mkv}
num_heads={H}
head_dim={K}
head_dim_value={Kv}
Correctness:
grad_query: {"PASS" if torch.allclose(gQ, gQr, rtol=RTOL, atol=ATOL) else "FAIL"} (delta: {(gQ - gQr).abs().max()})
grad_key: {"PASS" if torch.allclose(gK, gKr, rtol=RTOL, atol=ATOL) else "FAIL"} (delta: {(gK - gKr).abs().max()})
grad_value: {"PASS" if torch.allclose(gV, gVr, rtol=RTOL, atol=ATOL) else "FAIL"} (delta: {(gV - gVr).abs().max()})
(atol={ATOL} / rtol={RTOL})
Runtime: {runtime_ms}ms ({(float_ops / (1024 ** 4)) / (runtime_ms / 1000):.4f} TFlops)
""")
assert torch.allclose(query.grad.float(), gQr, rtol=RTOL, atol=ATOL), "Reference implementation does not match PyTorch autograd!"
assert torch.allclose(key.grad.float(), gKr, rtol=RTOL, atol=ATOL), "Reference implementation does not match PyTorch autograd!"
assert torch.allclose(value.grad.float(), gVr, rtol=RTOL, atol=ATOL), "Reference implementation does not match PyTorch autograd!"
| examples/41_fused_multi_head_attention/fmha_backward_test.py/0 | {
"file_path": "examples/41_fused_multi_head_attention/fmha_backward_test.py",
"repo_id": "examples",
"token_count": 3659
} | 11 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/pitch_linear.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines the optimal thread map for TensorOp accumulator layouts
template <
typename ThreadblockShape_,
typename WarpShape_,
int PartitionsK,
typename Element_,
int ElementsPerAccess
>
struct DefaultThreadMapTensorOpForFusedBias {
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
static int const kPartitionsK = PartitionsK;
using Element = Element_;
static int const kElementsPerAccess = ElementsPerAccess;
//
// Definitions
//
struct Detail {
/// Tensor Operations fundamentally perform operations on 8 rows
static int const kTensorOpRows = 8;
static int const kWarpSize = 32;
static_assert(
!(ThreadblockShape::kM % WarpShape::kM) &&
!(ThreadblockShape::kM % WarpShape::kM), "Divisibility");
/// Number of warps
using WarpCount = gemm::GemmShape<
ThreadblockShape::kM / WarpShape::kM,
ThreadblockShape::kN / WarpShape::kN,
kPartitionsK
>;
/// Number of participating threads
static int const kThreads = WarpCount::kCount * kWarpSize;
};
//
// ThreadMap
//
/// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap
using Type = OutputTileOptimalThreadMapBiasAct <
OutputTileShape<ThreadblockShape::kN, Detail::kTensorOpRows, Detail::WarpCount::kM, 1, 1>,
OutputTileShape<1, WarpShape::kM / Detail::kTensorOpRows, 1, 1, WarpShape::kM / Detail::kTensorOpRows>,
Detail::kThreads,
kElementsPerAccess,
sizeof_bits<Element>::value
>;
};
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/default_thread_map_tensor_op_for_fused_bias.h/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/default_thread_map_tensor_op_for_fused_bias.h",
"repo_id": "examples",
"token_count": 1126
} | 12 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
def type_2_cutlass_type(input_type = "fp16"):
# float point type
if input_type == "fp32":
return "float"
if input_type == "bf16":
return "cutlass::bfloat16_t"
if input_type == "fp16":
return "cutlass::half_t"
# integer type
if(input_type == "int32"):
return "int32_t"
if(input_type == "int8"):
return "int8_t"
if input_type == 'Row':
return 'cutlass::layout::RowMajor'
if input_type == 'Col':
return 'cutlass::layout::ColumnMajor'
def cvt_2_cutlass_shape(gemm_shape):
# gemm shape
if len(gemm_shape) == 3:
val = "cutlass::gemm::GemmShape<" \
+ str(gemm_shape[0]) + ", " \
+ str(gemm_shape[1]) + ", " \
+ str(gemm_shape[2]) + ">"
return val
def write_2_headfile(filename, file_dir, string):
with open(file_dir + filename, 'w') as f:
f.write("/* Auto Generated code - Do not edit.*/\n\n\n#pragma once\n" + string)
def var_idx(varaiable, index):
return varaiable + str(index)
def list_2_string(input_list, ):
rtn_string = ""
cnt = 0
for element in input_list:
final = ", \n"
if cnt == len(input_list) - 1:
final = "\n"
cnt += 1
rtn_string += str(element) + final
return rtn_string
def get_epilogue_info(layer_info):
return layer_info['epilogue']
def get_epilogue_tp(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['tp']
def get_epilogue_add_bias_or_not(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['bias']['addbias']
def get_epilogue_add_bias_tp(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['bias']['bias_tp']
def get_epilogue_args(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['args']
def get_epilogue_bias_shape(layer_info):
bias_tp = get_epilogue_add_bias_tp(layer_info).lower()
mn_shape = layer_info['mnk'][:-1]
if bias_tp == 'mat':
mn_shape[0] = 'M'
return mn_shape
elif bias_tp == 'vec':
mn_shape[0] = 1
return mn_shape
else:
assert(0)
def get_epilogue_bias_ldm(layer_info):
bias_tp = get_epilogue_add_bias_tp(layer_info).lower()
mn_shape = layer_info['mnk'][:-1]
c_layout = layer_info['C_format'].lower()
if c_layout != 'row':
assert(0)
if bias_tp == 'mat':
return mn_shape[1]
elif bias_tp == 'vec':
return 0
else:
assert(0)
def get_epilogue_compute_tp(layer_info):
return layer_info['Acc_tp']
| examples/44_multi_gemm_ir_and_codegen/ir_gen/helper.py/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/helper.py",
"repo_id": "examples",
"token_count": 1814
} | 13 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run depthwise 2d convolution kernels using functions and data structures
provided by CUTLASS using SIMT instruction;
There are 3 types of implementations of depthwise 2d convoltion
1. kAnalytic
Implicit gemm 2d convoltion algorithm.
2. kOptimized
An optimized algorithm and supports arbitrary stride and dilation.
3. kFixedStrideDilation
An optimized algorithm with fixed stride and dilation to reduce the runtime computation and do
more optimizations.
In general, the perf of kFixedStrideDilation would be better than kOptimized. However, if the filter
size, stride or dilation is large, it would encounter register spilling and may hurt the perf. If
in this case, please use kOptimized.
For kOptimized and kFixedStrideDilation, in order to fully utilize GPU hardware resources and achieve
better perf, when the output tensor size is large, splitk should be enabled to achieve better perf.
In this example, it demonstrates how to construct and run a FixedStrideDilation depthwise 2d
convolution kernel.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_depthwise_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/conv/device/direct_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = cutlass::half_t; // Data type of accumulator
using ElementComputeEpilogue = cutlass::half_t; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = cutlass::half_t; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassSimt;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm60;
// This code section describes the groups a thread block will compute
constexpr int groups_per_cta = 64;
// This code section describes the output tile <N, O, P, Q> a thread block will compute
using ThreadBlockOutputShape = cutlass::conv::TensorNHWCShape<1, 8, 8, groups_per_cta>;
// This code section describes the filter shape <R, S>
using FilterShape = cutlass::MatrixShape<3, 3>;
// Threadblock tile shape
using ThreadblockShape =
cutlass::gemm::GemmShape<ThreadBlockOutputShape::kNHW, groups_per_cta, FilterShape::kCount>;
// This code section describes tile size a warp will computes
// WarpShape::kM = P * Q the warps would process
// WarpShape::kN = groups_per_cta that the warps would process
// WarpShape::kK = filter_size that the warps would process
using WarpShape = cutlass::gemm::GemmShape<16, groups_per_cta, FilterShape::kCount>;
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock =
cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle<
1,
ThreadBlockOutputShape::kN,
ThreadBlockOutputShape::kH,
ThreadBlockOutputShape::kW>;
// Number of pipelines you want to use
constexpr int NumStages = 4;
// This code section describe iterator algorithm selected is kFixedStrideDilation
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm =
cutlass::conv::IteratorAlgorithm::kFixedStrideDilation;
using StrideShape = cutlass::MatrixShape<1, 1>;
using DilationShape = cutlass::MatrixShape<1, 1>;
constexpr int kEpilogueElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
kEpilogueElementsPerAccess, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue, // Data type for alpha/beta in linear combination
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling>; // Epilogue scaling operation.
using DepthwiseDirect2dConv = typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConvFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm,
cutlass::conv::StrideSupport::kFixed,
StrideShape,
DilationShape>::Kernel;
using Direct2dConv = cutlass::conv::device::DirectConvolution<DepthwiseDirect2dConv>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
int groups;
int splitk;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
std::string tag;
Options()
: help(false),
input_size(1, 128, 128, 32),
filter_size(32, 3, 3, 1),
groups(32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(false),
measure_performance(true),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
splitk(1) {}
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// depthwise conv
if (groups != input_size.c()) {
return false;
}
if (filter_size.n() != groups) {
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) || (padding.w() != filter_size.w() / 2)) {
return false;
}
// Filter size passed through command line does not match filter size template parameter
if (filter_size.h() != FilterShape::kRow || filter_size.w() != FilterShape::kColumn) {
std::cerr << "Filter size passed in (" << filter_size.h() << "x" << filter_size.w() << ") "
<< "must match the FilterShape template parameter of the convolution "
<< "(" << FilterShape::kRow << "x" << FilterShape::kColumn << "). "
<< "To use the filter shape passed in, change the FilterShape template "
<< "parameter and recompile this example."
<< std::endl;
return false;
}
return true;
}
/// Updates input and filter sizes
void update(cutlass::Tensor4DCoord input_size, cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
cmd.get_cmd_line_argument("g", groups);
filter_size.c() = 1;
filter_size.n() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("splitk", splitk);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
int32_t padding_h = filter_size.h() / 2;
int32_t padding_w = filter_size.w() / 2;
padding = {padding_h, padding_h, padding_w, padding_w};
}
/// Prints the usage statement.
std::ostream &print_usage(std::ostream &out) const {
out << "46_depthwise_gemm_fprop example\n\n"
<< " This example uses Ampere's Tensor Core operators on F16 data types to compute\n"
<< " forward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --g=<int> Groups\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --splitk=<int> Enable splitK\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag=<string> String to replicate across the first column in the results "
"table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/46_depthwise_simt_conv2dfprop/46_depthwise_simt_conv2dfprop --n=32 "
"--h=224 --w=224 --c=128 --k=128 --g=128 --r=3 --s=3\n\n"
<< "$ ./examples/46_depthwise_simt_conv2dfprop/46_depthwise_simt_conv2dfprop --n=1 "
"--h=224 --w=224 --c=32 --k=32 --g=32 --r=3 --s=3 --splitk=10 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas =
output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result()
: runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) {}
static std::ostream &print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,G,stride_h,stride_w,dilation_h,dilation_w,splitK,Runtime,GFLOPs";
return out;
}
std::ostream &print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
cutlass::Tensor4DCoord output_size = options.output_size();
out << "conv_" << idx << "," << options.input_size.n() << "," << options.input_size.h() << ","
<< options.input_size.w() << "," << options.input_size.c() << ","
<< options.filter_size.n() << "," << options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.groups << "," << options.conv_stride.row() << "," << options.conv_stride.column()
<< ","
<< options.dilation.row() << "," << options.dilation.column() << ","
<< options.splitk << ","
<< runtime_ms << "," << gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one testcase
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b_transpose(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(), 1, ElementInputA(5), ElementInputA(-6), 0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(), 1, ElementInputB(3), ElementInputB(-6), 0);
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(), 1, ElementOutput(5), ElementOutput(-6), 0);
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(tensor_d.host_view());
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_b_transpose.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split P*Q into multiple CTA
int split_k_slices = options.splitk;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices,
options.groups);
// Construct Direc2dConv::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename Direct2dConv::Arguments arguments{problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
tensor_b_transpose.device_ref()};
//
// Initialize CUTLASS Convolution
//
Direct2dConv implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on host...\n";
// Compute with reference implementation
cutlass::reference::host::Conv2dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator >(problem_size,
tensor_a.host_ref(),
tensor_b.host_ref(),
tensor_c.host_ref(),
tensor_ref_d.host_ref(),
options.alpha,
options.beta);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_d.sync_host();
bool passed =
cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
} else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
} else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "46_depthwise_simt_conv2dfprop" << options.input_size.n() << "x" << options.input_size.h()
<< "x" << options.input_size.w() << "x" << options.input_size.c() << "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x"
<< options.filter_size.w() << "x" << options.filter_size.c() << ".dat";
std::ofstream output_workspace(ss.str());
output_workspace << "Input = \n"
<< tensor_a.host_view() << "\n\n"
<< "Filters = \n"
<< tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto &event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error)
<< std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 6)) {
std::cerr << "Run on a machine with compute capability at least 60." << std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/46_depthwise_simt_conv2dfprop/depthwise_simt_conv2dfprop.cu/0 | {
"file_path": "examples/46_depthwise_simt_conv2dfprop/depthwise_simt_conv2dfprop.cu",
"repo_id": "examples",
"token_count": 9564
} | 14 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cute/numeric/math.hpp"
namespace example
{
// Naive grid-stride loop implementation of gather
template<typename Element, typename Func>
__global__ void
gather_kernel(Element const * __restrict__ input,
Element * __restrict__ output,
Func func,
int num_elems_input,
int num_elems_output,
cutlass::FastDivmod stride_divmod)
{
Element const * input_b = input + blockIdx.z * num_elems_input;
Element * output_b = output + blockIdx.z * num_elems_output;
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
for (int k = tidx; k < num_elems_output; k += blockDim.x * gridDim.x) {
int i,j;
stride_divmod(j, i, k);
output_b[k] = input_b[i + func(j) * stride_divmod.divisor];
}
}
// Gather elements along strided dimension of the tensor according to given indices
template<typename Element, typename Func>
void
gather(Element const * input,
Element * output,
Func func,
int batch_size,
int num_elems_input,
int num_elems_output,
int stride,
cutlass::KernelHardwareInfo const& hw_info)
{
// Upcast to uint128_t data type
int factor = 128 / cutlass::sizeof_bits<Element>::value;
assert(stride % factor == 0);
int stride_upcast = stride/factor;
int num_elems_input_upcast = num_elems_input / factor;
int num_elems_output_upcast = num_elems_output / factor;
cutlass::FastDivmod stride_divmod(stride_upcast);
dim3 blocks(hw_info.sm_count, 1, batch_size);
gather_kernel<<<blocks, 1024>>>(reinterpret_cast<cute::uint128_t const *>(input),
reinterpret_cast<cute::uint128_t *>(output),
func,
num_elems_input_upcast,
num_elems_output_upcast,
stride_divmod);
}
// Naive grid-stride loop implementation of scatter
template<typename Element, typename Func>
__global__ void
scatter_kernel(Element const * __restrict__ input,
Element * __restrict__ output,
Func func,
int num_elems_input,
int num_elems_output,
cutlass::FastDivmod stride_divmod)
{
Element const * input_b = input + blockIdx.z * num_elems_input;
Element * output_b = output + blockIdx.z * num_elems_output;
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
for (int k = tidx; k < num_elems_input; k += blockDim.x * gridDim.x) {
int i,j;
stride_divmod(j, i, k);
output_b[i + func(j) * stride_divmod.divisor] = input_b[k];
}
}
// Gather elements along strided dimension of the tensor according to given indices
template<typename Element, typename Func>
void
scatter(Element const * input,
Element * output,
Func func,
int batch_size,
int num_elems_input,
int num_elems_output,
int stride,
cutlass::KernelHardwareInfo const& hw_info)
{
// Upcast to uint128_t data type
int factor = 128 / cutlass::sizeof_bits<Element>::value;
assert(stride % factor == 0);
int stride_upcast = stride/factor;
int num_elems_input_upcast = num_elems_input / factor;
int num_elems_output_upcast = num_elems_output / factor;
cutlass::FastDivmod stride_divmod(stride_upcast);
dim3 blocks(hw_info.sm_count, 1, batch_size);
scatter_kernel<<<blocks, 1024>>>(reinterpret_cast<cute::uint128_t const *>(input),
reinterpret_cast<cute::uint128_t *>(output),
func,
num_elems_input_upcast,
num_elems_output_upcast,
stride_divmod);
}
} // namespace example
| examples/52_hopper_gather_scatter_fusion/gather_kernel.cuh/0 | {
"file_path": "examples/52_hopper_gather_scatter_fusion/gather_kernel.cuh",
"repo_id": "examples",
"token_count": 2200
} | 15 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "cutlass/util/helper_cuda.hpp"
template <class ProblemShape, class CtaTiler,
class TA, class AStride, class ASmemLayout, class TiledCopyA,
class TB, class BStride, class BSmemLayout, class TiledCopyB,
class TC, class CStride, class CSmemLayout, class TiledMma,
class Alpha, class Beta>
__global__ static
__launch_bounds__(decltype(size(TiledMma{}))::value)
void
gemm_device(ProblemShape shape_MNK, CtaTiler cta_tiler,
TA const* A, AStride dA, ASmemLayout sA_layout, TiledCopyA copy_a,
TB const* B, BStride dB, BSmemLayout sB_layout, TiledCopyB copy_b,
TC * C, CStride dC, CSmemLayout , TiledMma mma,
Alpha alpha, Beta beta)
{
using namespace cute;
// Preconditions
CUTE_STATIC_ASSERT_V(rank(shape_MNK) == Int<3>{}); // (M, N, K)
CUTE_STATIC_ASSERT_V(rank(cta_tiler) == Int<3>{}); // (BLK_M, BLK_N, BLK_K)
CUTE_STATIC_ASSERT_V(size(copy_a) == size(mma)); // NumThreads
CUTE_STATIC_ASSERT_V(size(copy_b) == size(mma)); // NumThreads
static_assert(is_static<ASmemLayout>::value);
static_assert(is_static<BSmemLayout>::value);
static_assert(is_static<CSmemLayout>::value);
CUTE_STATIC_ASSERT_V(size<0>(ASmemLayout{}) == size<0>(cta_tiler)); // BLK_M
CUTE_STATIC_ASSERT_V(size<0>(CSmemLayout{}) == size<0>(cta_tiler)); // BLK_M
CUTE_STATIC_ASSERT_V(size<0>(BSmemLayout{}) == size<1>(cta_tiler)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(CSmemLayout{}) == size<1>(cta_tiler)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(ASmemLayout{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(size<1>(BSmemLayout{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(congruent(select<0,2>(shape_MNK), dA)); // dA strides for shape MK
CUTE_STATIC_ASSERT_V(congruent(select<1,2>(shape_MNK), dB)); // dB strides for shape NK
CUTE_STATIC_ASSERT_V(congruent(select<0,1>(shape_MNK), dC)); // dC strides for shape MN
//
// Full and Tiled Tensors
//
// Represent the full tensors
Tensor mA = make_tensor(make_gmem_ptr(A), select<0,2>(shape_MNK), dA); // (M,K)
Tensor mB = make_tensor(make_gmem_ptr(B), select<1,2>(shape_MNK), dB); // (N,K)
Tensor mC = make_tensor(make_gmem_ptr(C), select<0,1>(shape_MNK), dC); // (M,N)
// Get the appropriate blocks for this thread block
auto cta_coord = make_coord(blockIdx.x, blockIdx.y, _); // (m,n,k)
Tensor gA = local_tile(mA, cta_tiler, cta_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,k)
Tensor gB = local_tile(mB, cta_tiler, cta_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,k)
Tensor gC = local_tile(mC, cta_tiler, cta_coord, Step<_1,_1, X>{}); // (BLK_M,BLK_N)
// Shared memory buffers
__shared__ TA smemA[cosize_v<ASmemLayout>];
__shared__ TB smemB[cosize_v<BSmemLayout>];
Tensor sA = make_tensor(make_smem_ptr(smemA), sA_layout); // (BLK_M,BLK_K)
Tensor sB = make_tensor(make_smem_ptr(smemB), sB_layout); // (BLK_N,BLK_K)
//
// Partition the copying of A and B tiles across the threads
//
// TUTORIAL: Example of partitioning via a TiledCopy
ThrCopy thr_copy_a = copy_a.get_slice(threadIdx.x);
Tensor tAgA = thr_copy_a.partition_S(gA); // (CPY,CPY_M,CPY_K,k)
Tensor tAsA = thr_copy_a.partition_D(sA); // (CPY,CPY_M,CPY_K)
Tensor tArA = make_fragment_like(tAsA); // (CPY,CPY_M,CPY_K)
ThrCopy thr_copy_b = copy_b.get_slice(threadIdx.x);
Tensor tBgB = thr_copy_b.partition_S(gB); // (CPY,CPY_N,CPY_K,k)
Tensor tBsB = thr_copy_b.partition_D(sB); // (CPY,CPY_N,CPY_K)
Tensor tBrB = make_fragment_like(tBsB); // (CPY,CPY_N,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tAgA) == size<1>(tAsA)); // CPY_M
CUTE_STATIC_ASSERT_V(size<1>(tAgA) == size<1>(tArA)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tAgA) == size<2>(tAsA)); // CPY_K
CUTE_STATIC_ASSERT_V(size<2>(tAgA) == size<2>(tArA)); // CPY_K
CUTE_STATIC_ASSERT_V(size<1>(tBgB) == size<1>(tBsB)); // CPY_N
CUTE_STATIC_ASSERT_V(size<1>(tBgB) == size<1>(tBrB)); // CPY_N
CUTE_STATIC_ASSERT_V(size<2>(tBgB) == size<2>(tBsB)); // CPY_K
CUTE_STATIC_ASSERT_V(size<2>(tBgB) == size<2>(tBrB)); // CPY_K
// Copy gmem to rmem for k_tile=0
copy(copy_a, tAgA(_,_,_,0), tArA);
copy(copy_b, tBgB(_,_,_,0), tBrB);
//
// Define A/B partitioning and C accumulators
//
// TUTORIAL: Example of partitioning via a TiledMMA
ThrMMA thr_mma = mma.get_slice(threadIdx.x);
Tensor tCsA = thr_mma.partition_A(sA); // (MMA,MMA_M,MMA_K)
Tensor tCsB = thr_mma.partition_B(sB); // (MMA,MMA_N,MMA_K)
Tensor tCgC = thr_mma.partition_C(gC); // (MMA,MMA_M,MMA_N)
// Allocate registers for pipelining
Tensor tCrA = thr_mma.make_fragment_A(tCsA); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K)
// Allocate the accumulators -- same size as the projected data
Tensor tCrC = thr_mma.make_fragment_C(tCgC); // (MMA,MMA_M,MMA_N)
CUTE_STATIC_ASSERT_V( shape(tCrA) == shape(tCsA)); // (MMA,MMA_M,MMA_K)
CUTE_STATIC_ASSERT_V( shape(tCrB) == shape(tCsB)); // (MMA,MMA_N,MMA_K)
CUTE_STATIC_ASSERT_V( shape(tCrC) == shape(tCgC)); // (MMA,MMA_M,MMA_N)
CUTE_STATIC_ASSERT_V(size<1>(tCgC) == size<1>(tCsA)); // MMA_M
CUTE_STATIC_ASSERT_V(size<2>(tCgC) == size<1>(tCsB)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // MMA_K
// Clear the accumulators
clear(tCrC);
#if 0
if(thread0()) {
print(" mA : "); print( mA); print("\n");
print(" gA : "); print( gA); print("\n");
print(" sA : "); print( sA); print("\n");
print("tAgA : "); print(tAgA); print("\n");
print("tAsA : "); print(tAsA); print("\n");
print("tArA : "); print(tArA); print("\n");
}
#endif
#if 0
if(thread0()) {
print(" mB : "); print( mB); print("\n");
print(" gB : "); print( gB); print("\n");
print(" sB : "); print( sB); print("\n");
print("tBgB : "); print(tBgB); print("\n");
print("tBsB : "); print(tBsB); print("\n");
print("tArA : "); print(tArA); print("\n");
}
#endif
#if 0
if(thread0()) {
print(" mC : "); print( mC); print("\n");
print(" gC : "); print( gC); print("\n");
print("tCsA : "); print(tCsA); print("\n");
print("tCsB : "); print(tCsB); print("\n");
print("tCgC : "); print(tCgC); print("\n");
print("tCrC : "); print(tCrC); print("\n");
}
#endif
#if 1
// Copy rmem to smem
copy(tArA, tAsA);
copy(tBrB, tBsB);
__syncthreads();
//
// PIPELINED MAIN LOOP
// TUTORIAL: Example of a gemm loop that pipelines shared memory AND register memory
// Data is read from global to registers, then to shared via the tA|tB partitions
// Data is then copied from shared to registers in multiple waves via the tC partitions
// and gemm(.) operates on the current register wave
//
// Load A, B shmem->regs for k_block=0
copy(tCsA(_,_,0), tCrA(_,_,0));
copy(tCsB(_,_,0), tCrB(_,_,0));
auto K_TILE_MAX = size<3>(tAgA);
auto K_BLOCK_MAX = size<2>(tCrA);
CUTE_NO_UNROLL
for (int k_tile = 0; k_tile < K_TILE_MAX; ++k_tile)
{
// Pipeline the k-mode of the block registers
CUTE_UNROLL
for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
// Copy rmem to smem
__syncthreads();
copy(tArA, tAsA);
copy(tBrB, tBsB);
__syncthreads();
}
// Copy smem to rmem for k_block+1
int k_block_next = (k_block + 1) % K_BLOCK_MAX;
copy(tCsA(_,_,k_block_next), tCrA(_,_,k_block_next));
copy(tCsB(_,_,k_block_next), tCrB(_,_,k_block_next));
if (k_block == 0)
{
// Copy gmem to rmem for k_tile+1
int k_tile_next = (k_tile + 1 < K_TILE_MAX) ? k_tile + 1 : k_tile;
copy(copy_a, tAgA(_,_,_,k_tile_next), tArA);
copy(copy_b, tBgB(_,_,_,k_tile_next), tBrB);
}
// Thread-level register gemm for k_block
gemm(mma, tCrA(_,_,k_block), tCrB(_,_,k_block), tCrC);
} // k_block
} // k_tile
#endif
//
// Epilogue
//
axpby(alpha, tCrC, beta, tCgC);
}
// Setup params for a NT GEMM
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_nt(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
using namespace cute;
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define NT strides (mixed)
auto dA = make_stride(Int<1>{}, ldA); // (dM, dK)
auto dB = make_stride(Int<1>{}, ldB); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 8>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
// Define the smem layouts (static)
auto sA = make_layout(make_shape(bM, bK)); // (m,k) -> smem_idx; m-major
auto sB = make_layout(make_shape(bN, bK)); // (n,k) -> smem_idx; n-major
auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx; m-major
// Define the thread layouts (static)
TiledCopy copyA = make_tiled_copy(Copy_Atom<UniversalCopy<uint128_t>, TA>{},
Layout<Shape<_32,_8>>{}, // Thr layout 32x8 m-major
Layout<Shape< _4,_1>>{}); // Val layout 4x1 m-major
TiledCopy copyB = make_tiled_copy(Copy_Atom<UniversalCopy<uint128_t>, TB>{},
Layout<Shape<_32,_8>>{}, // Thr layout 32x8 n-major
Layout<Shape< _4,_1>>{}); // Val layout 4x1 n-major
TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{},
Layout<Shape<_16,_16,_1>>{}); // 16x16x1 TiledMMA
#if 0
print(copyA);
print(copyB);
print(mmaC);
#endif
#if 0
print_latex(copyA);
print_latex(copyB);
print_latex(mmaC);
#endif
dim3 dimBlock(size(mmaC));
dim3 dimGrid(size(ceil_div(M, bM)),
size(ceil_div(N, bN)));
gemm_device<<<dimGrid, dimBlock, 0, stream>>>
(prob_shape, cta_tiler,
A, dA, sA, copyA,
B, dB, sB, copyB,
C, dC, sC, mmaC,
alpha, beta);
}
// Setup params for a TN GEMM
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_tn(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
using namespace cute;
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define TN strides (mixed)
auto dA = make_stride(ldA, Int<1>{}); // (dM, dK)
auto dB = make_stride(ldB, Int<1>{}); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 8>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
// Define the smem layouts (static)
auto sA = make_layout(make_shape ( bM, bK),
make_stride(Int<1>{}, bM+Int<1>{})); // (m,k) -> smem_idx; padded m-major
auto sB = make_layout(make_shape ( bN, bK),
make_stride(Int<1>{}, bN+Int<1>{})); // (n,k) -> smem_idx; padded n-major
auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx
// Define the thread layouts (static)
TiledCopy copyA = make_tiled_copy(Copy_Atom<UniversalCopy<TA>, TA>{},
Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major
Layout<Shape< _1,_1>>{}); // Val layout 1x1
TiledCopy copyB = make_tiled_copy(Copy_Atom<UniversalCopy<TB>, TB>{},
Layout<Shape<_32,_8>,Stride<_8,_1>>{}, // Thr layout 32x8 k-major
Layout<Shape< _1,_1>>{}); // Val layout 1x1
TiledMMA mmaC = make_tiled_mma(UniversalFMA<TC,TA,TB>{},
Layout<Shape<_16,_16,_1>>{}); // 16x16x1 TiledMMA
#if 0
print(copyA);
print(copyB);
print(mmaC);
#endif
#if 0
print_latex(copyA);
print_latex(copyB);
print_latex(mmaC);
#endif
dim3 dimBlock(size(mmaC));
dim3 dimGrid(size(ceil_div(M, bM)),
size(ceil_div(N, bN)));
gemm_device<<<dimGrid, dimBlock, 0, stream>>>
(prob_shape, cta_tiler,
A, dA, sA, copyA,
B, dB, sB, copyB,
C, dC, sC, mmaC,
alpha, beta);
}
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm(char transA, char transB, int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
if (transA == 'N' && transB == 'T') {
return gemm_nt(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
} else
if (transA == 'T' && transB == 'N') {
return gemm_tn(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
}
assert(false && "Not implemented");
}
int main(int argc, char** argv)
{
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (props.major < 7) {
std::cout << "This example requires an Volta GPU or newer (CC >= 70)" << std::endl;
// Return 0 so tests pass if run on unsupported architectures or CUDA Toolkits.
return 0;
}
int m = 5120;
if (argc >= 2)
sscanf(argv[1], "%d", &m);
int n = 5120;
if (argc >= 3)
sscanf(argv[2], "%d", &n);
int k = 4096;
if (argc >= 4)
sscanf(argv[3], "%d", &k);
char transA = 'N';
if (argc >= 5)
sscanf(argv[4], "%c", &transA);
char transB = 'T';
if (argc >= 6)
sscanf(argv[5], "%c", &transB);
using TA = float;
using TB = float;
using TC = float;
using TI = float;
TI alpha = 1.0;
TI beta = 0.0;
std::cout << "M = " << m << std::endl;
std::cout << "N = " << n << std::endl;
std::cout << "K = " << k << std::endl;
std::cout << "C = A^" << transA << " B^" << transB << std::endl;
thrust::host_vector<TA> h_A(m*k);
thrust::host_vector<TB> h_B(n*k);
thrust::host_vector<TC> h_C(m*n);
for (int j = 0; j < m*k; ++j) h_A[j] = static_cast<TA>( 2*(rand() / double(RAND_MAX)) - 1 );
for (int j = 0; j < n*k; ++j) h_B[j] = static_cast<TB>( 2*(rand() / double(RAND_MAX)) - 1 );
for (int j = 0; j < m*n; ++j) h_C[j] = static_cast<TC>(-1);
thrust::device_vector<TA> d_A = h_A;
thrust::device_vector<TB> d_B = h_B;
thrust::device_vector<TC> d_C = h_C;
double gflops = (2.0*m*n*k) * 1e-9;
const int timing_iterations = 100;
GPU_Clock timer;
int ldA = 0, ldB = 0, ldC = m;
if (transA == 'N') {
ldA = m;
} else if (transA == 'T') {
ldA = k;
} else {
assert(false);
}
if (transB == 'N') {
ldB = k;
} else if (transB == 'T') {
ldB = n;
} else {
assert(false);
}
// Run once
d_C = h_C;
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
CUTE_CHECK_LAST();
thrust::host_vector<TC> cute_result = d_C;
// Timing iterations
timer.start();
for (int i = 0; i < timing_iterations; ++i) {
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
}
double cute_time = timer.seconds() / timing_iterations;
CUTE_CHECK_LAST();
printf("CUTE_GEMM: [%6.1f]GFlop/s (%6.4f)ms\n", gflops / cute_time, cute_time*1000);
return 0;
}
| examples/cute/tutorial/sgemm_sm70.cu/0 | {
"file_path": "examples/cute/tutorial/sgemm_sm70.cu",
"repo_id": "examples",
"token_count": 9373
} | 16 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/algorithm/functional.hpp>
#include <cute/tensor.hpp>
#include <cute/atom/mma_atom.hpp>
/** The gemm algorithm takes four (or three) tensors and computes
* D = A * B + C
* It dispatches based on the number of modes each tensor has:
*
* 1. `(V) x (V) => (V)`.
* The element-wise product of vectors. Dispatches to FMA or MMA.
* 2. `(M) x (N) => (M,N)`.
* The outer product of vectors. Dispatches to [3] with new mode K=(1).
* 3. `(M,K) x (N,K) => (M,N)`.
* The product of matrices. Dispatches to [5] with MMA vector-mode V.
* 4. `(V,M) x (V,N) => (V,M,N)`.
* The batched outer product of vectors. Accounts for register reuse and dispatches to [1] for each (m,n).
* 5. `(V,M,K) x (V,N,K) => (V,M,N)`.
* The batched product of matrices. Dispatches to [4] for each (k).
*/
namespace cute
{
//
// Three arguments to four
//
template <class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> & C)
{
return gemm(C, A, B, C);
}
template <class MMA,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> & C)
{
return gemm(mma, C, A, B, C);
}
//
// Accept mutable temporaries
//
template <class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> && C)
{
return gemm(C, A, B, C);
}
template <class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(Tensor<TD, DLayout> && D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
return gemm(D, A, B, C);
}
template <class MMA,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> && C)
{
return gemm(mma, C, A, B, C);
}
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> && D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
return gemm(mma, D, A, B, C);
}
//
// Default MMA is UniversalFMA
//
template <class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE
void
gemm(Tensor<TD, DLayout> & D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
using MMA = MMA_Atom<UniversalFMA<typename Tensor<TD,DLayout>::value_type,
typename Tensor<TA,ALayout>::value_type,
typename Tensor<TB,BLayout>::value_type,
typename Tensor<TC,CLayout>::value_type>>;
return gemm(MMA{}, D, A, B, C);
}
//
// Thread-Local Register-Memory GEMMs
//
// Dispatch [1]: (V) x (V) => (V)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 1 && is_rmem<TD>::value &&
ALayout::rank == 1 && is_rmem<TA>::value &&
BLayout::rank == 1 && is_rmem<TB>::value &&
CLayout::rank == 1 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (V) Logical data
Tensor<TA, ALayout> const& A, // (V) Logical data
Tensor<TB, BLayout> const& B, // (V) Logical data
Tensor<TC, CLayout> const& C) // (V) Logical data
{
// No static assertions on (V), MMA checks compatibility
mma.call(D, A, B, C);
}
// Dispatch [2]: (M) x (N) => (M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 2 && is_rmem<TD>::value &&
ALayout::rank == 1 && is_rmem<TA>::value &&
BLayout::rank == 1 && is_rmem<TB>::value &&
CLayout::rank == 2 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (M,N) Logical data
Tensor<TA, ALayout> const& A, // (M) Logical data
Tensor<TB, BLayout> const& B, // (N) Logical data
Tensor<TC, CLayout> const& C) // (M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<0>(A) == size<0>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<0>(B) == size<1>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D));
gemm(mma,
D, // (M,N)
make_tensor(A.data(), append<2>(A.layout())), // (M,1)
make_tensor(B.data(), append<2>(B.layout())), // (N,1)
C); // (M,N)
}
// Dispatch [3]: (M,K) x (N,K) => (M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 2 && is_rmem<TD>::value &&
ALayout::rank == 2 && is_rmem<TA>::value &&
BLayout::rank == 2 && is_rmem<TB>::value &&
CLayout::rank == 2 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (M,N) Logical data
Tensor<TA, ALayout> const& A, // (M,K) Logical data
Tensor<TB, BLayout> const& B, // (N,K) Logical data
Tensor<TC, CLayout> const& C) // (M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<0>(A) == size<0>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<0>(B) == size<1>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(B)); // AK == BK
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D));
// Assert this is a 1-value MMA
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutC_TV{}) == Int<1>{});
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutA_TV{}) == Int<1>{});
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutB_TV{}) == Int<1>{});
gemm(mma,
make_tensor(D.data(), prepend<3>(D.layout())), // (1,M,N)
make_tensor(A.data(), prepend<3>(A.layout())), // (1,M,K)
make_tensor(B.data(), prepend<3>(B.layout())), // (1,N,K)
make_tensor(C.data(), prepend<3>(C.layout()))); // (1,M,N)
}
// Dispatch [4]: (V,M) x (V,N) => (V,M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 3 && is_rmem<TD>::value &&
ALayout::rank == 2 && is_rmem<TA>::value &&
BLayout::rank == 2 && is_rmem<TB>::value &&
CLayout::rank == 3 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (V,M,N) Logical data
Tensor<TA, ALayout> const& A, // (V,M) Logical data
Tensor<TB, BLayout> const& B, // (V,N) Logical data
Tensor<TC, CLayout> const& C) // (V,M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<1>(B) == size<2>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D) && size<2>(C) == size<2>(D));
auto M = size<1>(A);
auto N = size<1>(B);
// REGISTER .reuse OPTIMIZATIONS
// 64-bit traversal specialization -- serpentine path
if constexpr (decltype(size<0>(A))::value * sizeof(typename TA::value_type) == 8 &&
decltype(size<0>(B))::value * sizeof(typename TB::value_type) == 8)
{
#if 1 // NOTE: Row- vs Col- major could depend on the C-matrix order... (which we can test)
// Row-major serpentine iteration
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
int ns = (m & 1) ? N-1-n : n; // Serpentine coordinate
gemm(mma, D(_,m,ns), A(_,m), B(_,ns), C(_,m,ns));
}
}
#else
// Col-major serpentine iteration
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
int ms = (n & 1) ? M-1-m : m; // Serpentine coordinate
gemm(mma, D(_,ms,n), A(_,ms), B(_,n), C(_,ms,n));
}
}
#endif
} else
// 32-bit traversal specialization -- kinked serpentine path
if constexpr (decltype(size<0>(A))::value * sizeof(typename TA::value_type) == 4 &&
decltype(size<0>(B))::value * sizeof(typename TB::value_type) == 4)
{
#if 1 // NOTE: Row- vs Col- major could depend on the C-matrix order... (which we can test)
// Row-major kinked serpentine iteration
CUTE_UNROLL
for (int m = 0; m < M; m += 2) {
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
int ns = (m & 2) ? N-1-n : n;
gemm(mma, D(_,m+0,ns), A(_,m+0), B(_,ns), C(_,m+0,ns));
if (m+1 < M) {
gemm(mma, D(_,m+1,ns), A(_,m+1), B(_,ns), C(_,m+1,ns));
}
}
}
#else
// Col-major kinked serpentine iteration
CUTE_UNROLL
for (int n = 0; n < N; n += 2) {
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
// Kinked serpentine traversal for maximum register reuse
int ms = (n & 2) ? M-1-m : m;
gemm(mma, D(_,ms,n+0), A(_,ms), B(_,n+0), C(_,ms,n+0));
if (n+1 < N) {
gemm(mma, D(_,ms,n+1), A(_,ms), B(_,n+1), C(_,ms,n+1));
}
}
}
#endif
} else
// 64-bit + 32-bit traversal order -- keep A (64-bit) in the outer loop and serpentine B
if constexpr (decltype(size<0>(A))::value * sizeof(typename TA::value_type) == 8 &&
decltype(size<0>(B))::value * sizeof(typename TB::value_type) == 4) {
// Row-major serpentine iteration
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
int ns = (m & 1) ? N-1-n : n; // Serpentine coordinate
gemm(mma, D(_,m,ns), A(_,m), B(_,ns), C(_,m,ns));
}
}
} else
// 32-bit + 64-bit traversal order -- keep B (64-bit) in the outer loop and serpentine A
if constexpr (decltype(size<0>(A))::value * sizeof(typename TA::value_type) == 4 &&
decltype(size<0>(B))::value * sizeof(typename TB::value_type) == 8) {
// Col-major serpentine iteration
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
int ms = (n & 1) ? M-1-m : m; // Serpentine coordinate
gemm(mma, D(_,ms,n), A(_,ms), B(_,n), C(_,ms,n));
}
}
} else
// Fallback to serpentine loop
{
// Col-major serpentine iteration
CUTE_UNROLL
for (int n = 0; n < N; ++n) {
CUTE_UNROLL
for (int m = 0; m < M; ++m) {
int ms = (n & 1) ? M-1-m : m; // Serpentine coordinate
gemm(mma, D(_,ms,n), A(_,ms), B(_,n), C(_,ms,n));
}
}
}
}
// Dispatch [5]: (V,M,K) x (V,N,K) => (V,M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 3 && is_rmem<TD>::value &&
ALayout::rank == 3 && is_rmem<TA>::value &&
BLayout::rank == 3 && is_rmem<TB>::value &&
CLayout::rank == 3 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (V,M,N) Logical data
Tensor<TA, ALayout> const& A, // (V,M,K) Logical data
Tensor<TB, BLayout> const& B, // (V,N,K) Logical data
Tensor<TC, CLayout> const& C) // (V,M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<1>(B) == size<2>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<2>(A) == size<2>(B)); // AK == BK
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D) && size<2>(C) == size<2>(D));
auto K = size<2>(A);
CUTE_UNROLL
for (int k = 0; k < K; ++k) {
gemm(mma, D, A(_,_,k), B(_,_,k), C);
}
}
//
// Thread-Local Shared-Memory GEMMs
//
// Dispatch [1]: (V) x (V) => (V)
// Dispatch [2]: (M) x (N) => (M,N)
// Dispatch [3]: (M,K) x (N,K) => (M,N)
// Dispatch [4]: (V,M) x (V,N) => (V,M,N)
// Dispatch [5]: (V,M,K) x (V,N,K) => (V,M,N)
// Dispatch [3]: (M,K) x (N,K) => (M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 2 && is_rmem<TD>::value &&
ALayout::rank == 2 && is_smem<TA>::value &&
BLayout::rank == 2 && is_smem<TB>::value &&
CLayout::rank == 2 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (M,N) Logical data
Tensor<TA, ALayout> const& A, // (M,K) Logical data
Tensor<TB, BLayout> const& B, // (N,K) Logical data
Tensor<TC, CLayout> const& C) // (M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<0>(A) == size<0>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<0>(B) == size<1>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(B)); // AK == BK
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D));
// Assert this is a 1-value MMA
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutC_TV{}) == Int<1>{});
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutA_TV{}) == Int<1>{});
CUTE_STATIC_ASSERT_V(size<1>(typename MMA_Atom<MMA>::LayoutB_TV{}) == Int<1>{});
gemm(mma,
make_tensor(D.data(), prepend<3>(D.layout())), // (1,M,N)
make_tensor(A.data(), prepend<3>(A.layout())), // (1,M,K)
make_tensor(B.data(), prepend<3>(B.layout())), // (1,N,K)
make_tensor(C.data(), prepend<3>(C.layout()))); // (1,M,N)
}
// Dispatch [5]: (V,M,K) x (V,N,K) => (V,M,N)
template <class MMA,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout,
__CUTE_REQUIRES(DLayout::rank == 3 && is_rmem<TD>::value &&
ALayout::rank == 3 && is_smem<TA>::value &&
BLayout::rank == 3 && is_smem<TB>::value &&
CLayout::rank == 3 && is_rmem<TC>::value)>
CUTE_HOST_DEVICE
void
gemm(MMA_Atom<MMA> const& mma,
Tensor<TD, DLayout> & D, // (V,M,N) Logical data
Tensor<TA, ALayout> const& A, // (V,M,K) Logical data
Tensor<TB, BLayout> const& B, // (V,N,K) Logical data
Tensor<TC, CLayout> const& C) // (V,M,N) Logical data
{
CUTE_STATIC_ASSERT_V(size<1>(A) == size<1>(C)); // AM == CM
CUTE_STATIC_ASSERT_V(size<1>(B) == size<2>(C)); // BN == CN
CUTE_STATIC_ASSERT_V(size<2>(A) == size<2>(B)); // AK == BK
CUTE_STATIC_ASSERT_V(size<0>(C) == size<0>(D) && size<1>(C) == size<1>(D) && size<2>(C) == size<2>(D));
auto rA = MMA_Atom<MMA>::make_fragment_A(A);
auto rB = MMA_Atom<MMA>::make_fragment_B(B);
auto K = size<2>(A);
CUTE_UNROLL
for (int k = 0; k < K; ++k)
{
copy(A(_,_,k), rA(_,_,k));
copy(B(_,_,k), rB(_,_,k));
// Thread-level register gemm for k
gemm(mma, D, rA(_,_,k), rB(_,_,k), C);
}
}
} // end namespace cute
| include/cute/algorithm/gemm.hpp/0 | {
"file_path": "include/cute/algorithm/gemm.hpp",
"repo_id": "include",
"token_count": 9055
} | 17 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/mma.hpp>
// Config
#if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))
# define CUTE_ARCH_MMA_SM75_SUPPORTED
# if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750))
# define CUTE_ARCH_MMA_SM75_ENABLED
# endif
#endif
namespace cute
{
//
// SM75 MMA 1688 F16F16F32
//
struct SM75_16x8x8_F32F16F16F32_TN
{
using DRegisters = float[4];
using ARegisters = uint32_t[2];
using BRegisters = uint32_t[1];
using CRegisters = float[4];
// Register asm fma
CUTE_HOST_DEVICE static void
fma(float & d0, float & d1, float & d2, float & d3,
uint32_t const& a0, uint32_t const& a1,
uint32_t const& b0,
float const& c0, float const& c1, float const& c2, float const& c3)
{
#if defined(CUTE_ARCH_MMA_SM75_ENABLED)
asm volatile("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32"
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3)
: "r"(a0), "r"(a1),
"r"(b0),
"f"(c0), "f"(c1), "f"(c2), "f"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM75_16x8x8_F32F16F16F32_TN without CUTE_ARCH_MMA_SM75_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// SM75 MMA 8816 S8S8S32
//
struct SM75_8x8x16_S32S8S8S32_TN
{
using DRegisters = uint32_t[2];
using ARegisters = uint32_t[1];
using BRegisters = uint32_t[1];
using CRegisters = uint32_t[2];
// Register asm fma
CUTE_HOST_DEVICE static void
fma(uint32_t & d0, uint32_t & d1,
uint32_t const& a0,
uint32_t const& b0,
uint32_t const& c0, uint32_t const& c1)
{
#if defined(CUTE_ARCH_MMA_SM75_ENABLED)
asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32"
"{%0, %1},"
"{%2},"
"{%3},"
"{%4, %5};\n"
: "=r"(d0), "=r"(d1)
: "r"(a0),
"r"(b0),
"r"(c0), "r"(c1));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM75_8x8x16_S32S8S8S32_TN without CUTE_ARCH_MMA_SM75_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // end namespace cute
| include/cute/arch/mma_sm75.hpp/0 | {
"file_path": "include/cute/arch/mma_sm75.hpp",
"repo_id": "include",
"token_count": 1728
} | 18 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/arch/mma.hpp>
#include <cute/tensor.hpp>
namespace cute
{
namespace detail {
template <class X, class = void>
struct supports_output_scaling { static constexpr bool value = false; };
template <class X>
struct supports_output_scaling<X, void_t<decltype(declval<X>().accumulate_)>> { static constexpr bool value = true; };
} // end namespace detail
/**
* concept MMA_Traits
* {
* using ValTypeD = // Logical A-value type
* using ValTypeA = // Logical B-value type
* using ValTypeB = // Logical C-value type
* using ValTypeC = // Logical D-value type (NOTE: Not used? Assumed == ValTypeD)
*
* using FrgTypeA = // A-type consumed by MMA (if ommitted, same as ValTypeA)
* using FrgTypeB = // B_type consumed by MMA (if ommitted, same as ValTypeB)
* using FrgTypeC = // C_type consumed by MMA (if ommitted, same as ValTypeC)
*
* using Shape_MNK = // Logical MxNxK shape of the MMA
*
* using ThrID = // Logical thread id (tid) -> tidx
*
* using ALayout = // (Logical thread id (tid), Logical value id (vid)) -> Flat MK-coord
* using BLayout = // (Logical thread id (tid), Logical value id (vid)) -> Flat NK-coord
* using CLayout = // (Logical thread id (tid), Logical value id (vid)) -> Flat MN-coord
* };
*/
template <class MMAOperation, class... MMAOpArgs>
struct MMA_Traits
{
static_assert(sizeof(MMAOperation) == 0, "MMA_Traits not implemented for this MMA_Operation.");
};
template <class D, class A, class B, class C>
struct MMA_Traits<UniversalFMA<D,A,B,C>>
{
using ValTypeD = D;
using ValTypeA = A;
using ValTypeB = B;
using ValTypeC = C;
// Logical shape of the MMA
using Shape_MNK = Shape<_1,_1,_1>;
// Logical thread id (tid) -> tidx
using ThrID = Layout<_1>;
// (Logical thread id (tid), Logical value id (vid)) -> coord
// (tid,vid) -> (m,k)
using ALayout = Layout<Shape<_1,_1>>;
// (tid,vid) -> (n,k)
using BLayout = Layout<Shape<_1,_1>>;
// (tid,vid) -> (m,n)
using CLayout = Layout<Shape<_1,_1>>;
};
//
// Generic mma_unpack for any MMA_Traits
//
template <class MMA_Op, class... MMA_Args,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE constexpr
void
mma_unpack(MMA_Traits<MMA_Op, MMA_Args...> const& traits,
Tensor<TD, DLayout> & D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
static_assert(is_rmem<TD>::value, "Expected registers in MMA_Atom::call");
static_assert(is_rmem<TA>::value, "Expected registers in MMA_Atom::call");
static_assert(is_rmem<TB>::value, "Expected registers in MMA_Atom::call");
static_assert(is_rmem<TC>::value, "Expected registers in MMA_Atom::call");
// Register value types from the MMA_Operation register arrays
using RegTypeD = typename remove_extent<typename MMA_Op::DRegisters>::type;
using RegTypeA = typename remove_extent<typename MMA_Op::ARegisters>::type;
using RegTypeB = typename remove_extent<typename MMA_Op::BRegisters>::type;
using RegTypeC = typename remove_extent<typename MMA_Op::CRegisters>::type;
using MMATraits = MMA_Traits<MMA_Op, MMA_Args...>;
[[maybe_unused]] constexpr int RegNumD = extent<typename MMA_Op::DRegisters>::value;
constexpr int RegNumA = extent<typename MMA_Op::ARegisters>::value;
constexpr int RegNumB = extent<typename MMA_Op::BRegisters>::value;
constexpr int RegNumC = extent<typename MMA_Op::CRegisters>::value;
Tensor rA = recast<RegTypeA>(A);
Tensor rB = recast<RegTypeB>(B);
CUTE_STATIC_ASSERT_V(size(rA) == Int<RegNumA>{});
CUTE_STATIC_ASSERT_V(size(rB) == Int<RegNumB>{});
if constexpr (is_same<RegTypeD, void>::value)
{
static_assert(is_same<typename TD::value_type, typename TC::value_type>::value, "GMMA C and D value_type must match.");
static_assert(is_same<DLayout, CLayout>::value, "GMMA C and D layouts must match.");
// assert((void*)&C == (void*)&D);
Tensor rC = recast<RegTypeC>(D); // NOTE: D and C are same, so use mutable D
//CUTE_STATIC_ASSERT_V(size(rC) == Int<RegNumC>{});
if constexpr (detail::supports_output_scaling<MMATraits>::value) {
detail::explode(MMA_Op::fma,
rA, make_int_sequence<RegNumA>{},
rB, make_int_sequence<RegNumB>{},
rC, make_int_sequence<RegNumC>{},
&(traits.accumulate_), seq<0>{});
}
else {
detail::explode(MMA_Op::fma,
rA, make_int_sequence<RegNumA>{},
rB, make_int_sequence<RegNumB>{},
rC, make_int_sequence<RegNumC>{});
}
}
else {
Tensor rD = recast<RegTypeD>(D);
Tensor rC = recast<RegTypeC>(C);
CUTE_STATIC_ASSERT_V(size(rD) == Int<RegNumD>{});
CUTE_STATIC_ASSERT_V(size(rC) == Int<RegNumC>{});
if constexpr (detail::supports_output_scaling<MMATraits>::value) {
detail::explode(MMA_Op::fma,
rD, make_int_sequence<RegNumD>{},
rA, make_int_sequence<RegNumA>{},
rB, make_int_sequence<RegNumB>{},
rC, make_int_sequence<RegNumC>{},
&(traits.accumulate_), seq<0>{});
}
else {
detail::explode(MMA_Op::fma,
rD, make_int_sequence<RegNumD>{},
rA, make_int_sequence<RegNumA>{},
rB, make_int_sequence<RegNumB>{},
rC, make_int_sequence<RegNumC>{});
}
}
}
//
// Accept mutable temporaries
//
template <class MMA_Op, class... MMA_Args,
class TD, class DLayout,
class TA, class ALayout,
class TB, class BLayout,
class TC, class CLayout>
CUTE_HOST_DEVICE constexpr
void
mma_unpack(MMA_Traits<MMA_Op, MMA_Args...> const& traits,
Tensor<TD, DLayout> && D,
Tensor<TA, ALayout> const& A,
Tensor<TB, BLayout> const& B,
Tensor<TC, CLayout> const& C)
{
mma_unpack(traits, D, A, B, C);
}
namespace detail {
template <class X, class = void>
struct FrgTypeA_or_Default { using type = typename X::ValTypeA; };
template <class X>
struct FrgTypeA_or_Default<X,void_t<typename X::FrgTypeA>> { using type = typename X::FrgTypeA; };
template <class X, class = void>
struct FrgTypeB_or_Default { using type = typename X::ValTypeB; };
template <class X>
struct FrgTypeB_or_Default<X,void_t<typename X::FrgTypeB>> { using type = typename X::FrgTypeB; };
template <class X, class = void>
struct FrgTypeC_or_Default { using type = typename X::ValTypeC; };
template <class X>
struct FrgTypeC_or_Default<X,void_t<typename X::FrgTypeC>> { using type = typename X::FrgTypeC; };
} // end namespace detail
} // namespace cute
| include/cute/atom/mma_traits.hpp/0 | {
"file_path": "include/cute/atom/mma_traits.hpp",
"repo_id": "include",
"token_count": 3561
} | 19 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/container/tuple.hpp>
#include <cute/container/array.hpp>
#include <cute/algorithm/tuple_algorithms.hpp>
#include <cute/numeric/integral_constant.hpp>
/** IntTuple is an integer or a tuple of IntTuples.
* This file holds utilities for working with IntTuples,
* but does not hold a concrete concept or class of IntTuple.
*/
namespace cute
{
// Implementation of get<0>(Integral).
// Even though is_tuple<Integral> is false and tuple_size<Integral> doesn't compile,
// CuTe defines rank(Integral) as 1, so it's useful for get<0>(Integral) to return its input
template <size_t I, class T, __CUTE_REQUIRES(cute::is_integral<cute::remove_cvref_t<T>>::value)>
CUTE_HOST_DEVICE constexpr
decltype(auto)
get(T&& t) noexcept
{
static_assert(I == 0, "Index out of range");
return static_cast<T&&>(t);
}
// Custom recursive get for anything that implements get<I>(.) (for a single integer I).
template <size_t I0, size_t I1, size_t... Is, class T>
CUTE_HOST_DEVICE constexpr
decltype(auto)
get(T&& t) noexcept
{
return get<I1, Is...>(get<I0>(static_cast<T&&>(t)));
}
//
// rank
//
template <int... Is, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
rank(IntTuple const& t)
{
if constexpr (sizeof...(Is) == 0) {
if constexpr (is_tuple<IntTuple>::value) {
return Int<tuple_size<IntTuple>::value>{};
} else {
return Int<1>{};
}
} else {
return rank(get<Is...>(t));
}
CUTE_GCC_UNREACHABLE;
}
template <class IntTuple>
using rank_t = decltype(rank(declval<IntTuple>()));
template <class IntTuple>
static constexpr int rank_v = rank_t<IntTuple>::value;
//
// shape
//
template <class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
shape(IntTuple const& s)
{
if constexpr (is_tuple<IntTuple>::value) {
return transform(s, [](auto const& a) { return shape(a); });
} else {
return s;
}
CUTE_GCC_UNREACHABLE;
}
template <int I, int... Is, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
shape(IntTuple const& s)
{
if constexpr (is_tuple<IntTuple>::value) {
return shape<Is...>(get<I>(s));
} else {
return get<I,Is...>(shape(s));
}
CUTE_GCC_UNREACHABLE;
}
//
// max
//
template <class T0, class... Ts>
CUTE_HOST_DEVICE constexpr
auto
max(T0 const& t0, Ts const&... ts)
{
if constexpr (is_tuple<T0>::value) {
return cute::max(cute::apply(t0, [](auto const&... a){ return cute::max(a...); }), ts...);
} else if constexpr (sizeof...(Ts) == 0) {
return t0;
} else {
return cute::max(t0, cute::max(ts...));
}
CUTE_GCC_UNREACHABLE;
}
//
// min
//
template <class T0, class... Ts>
CUTE_HOST_DEVICE constexpr
auto
min(T0 const& t0, Ts const&... ts)
{
if constexpr (is_tuple<T0>::value) {
return cute::min(cute::apply(t0, [](auto const&... a){ return cute::min(a...); }), ts...);
} else if constexpr (sizeof...(Ts) == 0) {
return t0;
} else {
return cute::min(t0, cute::min(ts...));
}
CUTE_GCC_UNREACHABLE;
}
//
// gcd
//
template <class T0, class... Ts>
CUTE_HOST_DEVICE constexpr
auto
gcd(T0 const& t0, Ts const&... ts)
{
if constexpr (is_tuple<T0>::value) {
return cute::gcd(cute::apply(t0, [](auto const&... a){ return cute::gcd(a...); }), ts...);
} else if constexpr (sizeof...(Ts) == 0) {
return t0;
} else {
return cute::gcd(t0, cute::gcd(ts...));
}
CUTE_GCC_UNREACHABLE;
}
//
// depth
//
template <int... Is, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
depth(IntTuple const& t)
{
if constexpr (sizeof...(Is) == 0) {
if constexpr (is_tuple<IntTuple>::value) {
return Int<1>{} + cute::apply(t, [](auto const&... v){ return cute::max(depth(v)...); });
} else {
return Int<0>{};
}
} else {
return depth(get<Is...>(t));
}
CUTE_GCC_UNREACHABLE;
}
template <class Tuple>
using depth_t = decltype(depth(declval<Tuple>()));
template <class Tuple>
static constexpr int depth_v = depth_t<Tuple>::value;
//
// product
//
// Implementation of product as a function object
struct Product
{
template <class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
operator()(IntTuple const& a) const
{
if constexpr (is_tuple<IntTuple>::value) {
if constexpr (tuple_size<IntTuple>::value == 0) {
return Int<1>{};
} else {
return cute::transform_apply(a, Product{}, multiplies_unary_lfold{});
}
} else if constexpr (cute::is_integral<IntTuple>::value) {
return a;
}
CUTE_GCC_UNREACHABLE;
}
};
// Callable product function object
CUTE_INLINE_CONSTANT Product product;
// Return a rank(t) tuple @a result such that get<i>(@a result) = product(get<i>(@a t))
template <class Tuple>
CUTE_HOST_DEVICE constexpr
auto
product_each(Tuple const& t)
{
return transform(wrap(t), product);
}
// Take the product of Tuple at the leaves of TupleG
template <class Tuple, class TupleG>
CUTE_HOST_DEVICE constexpr
auto
product_like(Tuple const& tuple, TupleG const& guide)
{
return transform_leaf(guide, tuple, [](auto const& g, auto const& t) { return product(t); });
}
// Return the product of elements in a mode
template <int... Is, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
size(IntTuple const& a)
{
if constexpr (sizeof...(Is) == 0) {
return product(a);
} else {
return size(get<Is...>(a));
}
CUTE_GCC_UNREACHABLE;
}
template <class IntTuple>
static constexpr int size_v = decltype(size(declval<IntTuple>()))::value;
//
// sum
//
template <class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
sum(IntTuple const& a)
{
if constexpr (is_tuple<IntTuple>::value) {
return cute::apply(a, [](auto const&... v){ return (Int<0>{} + ... + sum(v)); });
} else {
return a;
}
CUTE_GCC_UNREACHABLE;
}
//
// inner_product
//
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
inner_product(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
static_assert(tuple_size<IntTupleA>::value == tuple_size<IntTupleB>::value, "Mismatched ranks");
return transform_apply(a, b, [](auto const& x, auto const& y) { return inner_product(x,y); },
[](auto const&... v) { return (Int<0>{} + ... + v); });
} else {
return a * b;
}
CUTE_GCC_UNREACHABLE;
}
//
// ceil_div
//
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
ceil_div(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value) {
if constexpr (is_tuple<IntTupleB>::value) { // tuple tuple
static_assert(tuple_size<IntTupleA>::value >= tuple_size<IntTupleB>::value, "Mismatched ranks");
constexpr int R = tuple_size<IntTupleA>::value; // Missing ranks in TupleB are implicitly 1
return transform(a, append<R>(b,Int<1>{}), [](auto const& x, auto const& y) { return ceil_div(x,y); });
} else { // tuple int
auto const [result, rest] = fold(a, cute::make_tuple(cute::make_tuple(), b),
[] (auto const& init, auto const& ai) {
return cute::make_tuple(append(get<0>(init), ceil_div(ai, get<1>(init))), ceil_div(get<1>(init), ai));
});
return result;
}
} else
if constexpr (is_tuple<IntTupleB>::value) { // int tuple
return ceil_div(a, product(b));
} else {
return (a + b - Int<1>{}) / b;
}
CUTE_GCC_UNREACHABLE;
}
//
// round_up
// Round @a a up to the nearest multiple of @a b.
// For negative numbers, rounds away from zero.
//
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
round_up(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
static_assert(tuple_size<IntTupleA>::value >= tuple_size<IntTupleB>::value, "Mismatched ranks");
constexpr int R = tuple_size<IntTupleA>::value; // Missing ranks in TupleB are implicitly 1
return transform(a, append<R>(b,Int<1>{}), [](auto const& x, auto const& y) { return round_up(x,y); });
} else {
return ((a + b - Int<1>{}) / b) * b;
}
CUTE_GCC_UNREACHABLE;
}
/** Division for Shapes
* Case Tuple Tuple:
* Perform shape_div element-wise
* Case Tuple Int:
* Fold the division of b across each element of a
* Example: shape_div((4,5,6),40) -> shape_div((1,5,6),10) -> shape_div((1,1,6),2) -> (1,1,3)
* Case Int Tuple:
* Return shape_div(a, product(b))
* Case Int Int:
* Enforce the divisibility condition a % b == 0 || b % a == 0 when possible
* Return a / b with rounding away from 0 (that is, 1 or -1 when a < b)
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
shape_div(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value) {
if constexpr (is_tuple<IntTupleB>::value) { // tuple tuple
static_assert(tuple_size<IntTupleA>::value == tuple_size<IntTupleB>::value, "Mismatched ranks");
return transform(a, b, [](auto const& x, auto const& y) { return shape_div(x,y); });
} else { // tuple int
auto const [result, rest] = fold(a, cute::make_tuple(cute::make_tuple(), b),
[] (auto const& init, auto const& ai) {
return cute::make_tuple(append(get<0>(init), shape_div(ai, get<1>(init))), shape_div(get<1>(init), ai));
});
return result;
}
} else
if constexpr (is_tuple<IntTupleB>::value) { // int tuple
return shape_div(a, product(b));
} else
if constexpr (is_static<IntTupleA>::value && is_static<IntTupleB>::value) {
static_assert(IntTupleA::value % IntTupleB::value == 0 || IntTupleB::value % IntTupleA::value == 0, "Static shape_div failure");
return C<shape_div(IntTupleA::value, IntTupleB::value)>{};
} else { // int int
//assert(a % b == 0 || b % a == 0); // Waive dynamic assertion
return a / b != 0 ? a / b : signum(a) * signum(b); // Division with rounding away from zero
}
CUTE_GCC_UNREACHABLE;
}
/** Minimum for Shapes
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
shape_min(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value || is_tuple<IntTupleB>::value) {
static_assert(dependent_false<IntTupleA>, "Not implemented.");
} else
if constexpr (is_constant<1, IntTupleA>::value || is_constant<1, IntTupleB>::value) {
return Int<1>{}; // _1 is less than all other shapes, preserve static
} else {
return cute::min(a, b);
}
CUTE_GCC_UNREACHABLE;
}
/** Return a tuple the same profile as A scaled by corresponding elements in B
*/
template <class A, class B>
CUTE_HOST_DEVICE constexpr
auto
elem_scale(A const& a, B const& b)
{
if constexpr (is_tuple<A>::value) {
return transform(a, b, [](auto const& x, auto const& y) { return elem_scale(x,y); });
} else {
return a * product(b);
}
CUTE_GCC_UNREACHABLE;
}
/** Test if two IntTuple have the same profile (hierarchical rank division)
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
congruent(IntTupleA const& a, IntTupleB const& b)
{
return bool_constant<is_same<decltype(repeat_like(shape(a),_0{})),
decltype(repeat_like(shape(b),_0{}))>::value>{};
}
template <class A, class B>
using is_congruent = decltype(congruent(declval<A>(), declval<B>()));
/** Test if two IntTuple have the similar profiles up to Shape A (hierarchical rank division)
* weakly_congruent is a partial order on A and B: A <= B
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
weakly_congruent(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
if constexpr (tuple_size<IntTupleA>::value != tuple_size<IntTupleB>::value) {
return false_type{};
} else {
return transform_apply(a, b, [](auto const& x, auto const& y) { return weakly_congruent(x,y); },
[](auto const&... z) { return (true_type{} && ... && z); });
}
} else if constexpr (is_integral<IntTupleA>::value) {
return true_type{};
} else if constexpr (is_integral<IntTupleB>::value) {
return false_type{};
} else {
return weakly_congruent(shape(a), shape(b));
}
CUTE_GCC_UNREACHABLE;
}
template <class A, class B>
using is_weakly_congruent = decltype(weakly_congruent(declval<A>(), declval<B>()));
/** Test if Shape A is compatible with Shape B:
* the size of A and B are the same, and
* any coordinate into A can also be used as a coordinate into B
* compatible is a partial order on A and B: A <= B
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
compatible(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
if constexpr (tuple_size<IntTupleA>::value != tuple_size<IntTupleB>::value) {
return false_type{};
} else {
return transform_apply(a, b, [](auto const& x, auto const& y) { return compatible(x,y); },
[](auto const&... z) { return (true_type{} && ... && z); });
}
} else if constexpr (is_integral<IntTupleA>::value) {
return a == size(b);
} else if constexpr (is_integral<IntTupleB>::value) {
return false_type{};
} else {
return compatible(shape(a), shape(b));
}
CUTE_GCC_UNREACHABLE;
}
template <class A, class B>
using is_compatible = decltype(compatible(declval<A>(), declval<B>()));
/** Test if Shape A is weakly compatible with Shape B:
* there exists a Shape C congruent to A such that compatible(elem_scale(A,C), B)
* weakly_compatible is a partial order on A and B: A <= B
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
weakly_compatible(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
if constexpr (tuple_size<IntTupleA>::value != tuple_size<IntTupleB>::value) {
return false_type{};
} else {
return transform_apply(a, b, [](auto const& x, auto const& y) { return weakly_compatible(x,y); },
[](auto const&... z) { return (true_type{} && ... && z); });
}
} else if constexpr (is_integral<IntTupleA>::value) {
return size(b) % a == Int<0>{};
} else if constexpr (is_integral<IntTupleB>::value) {
return false_type{};
} else {
return weakly_compatible(shape(a), shape(b));
}
CUTE_GCC_UNREACHABLE;
}
template <class A, class B>
using is_weakly_compatible = decltype(weakly_compatible(declval<A>(), declval<B>()));
/** Replace the elements of Tuple B that are paired with an Int<0> with an Int<1>
*/
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
filter_zeros(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value) {
return transform(a, b, [](auto const& x, auto const& y) { return filter_zeros(x,y); });
} else if constexpr (is_constant<0, IntTupleA>::value) {
return Int<1>{};
} else {
return b;
}
CUTE_GCC_UNREACHABLE;
}
template <class Tuple>
CUTE_HOST_DEVICE constexpr
auto
filter_zeros(Tuple const& t)
{
return filter_zeros(t, t);
}
//
// Converters and constructors with arrays and params
//
/** Make an IntTuple of rank N from an Indexable array.
* Access elements up to a dynamic index n, then use init (requires compatible types)
* Consider cute::take<B,E> if all indexing is known to be valid
* \code
* std::vector<int> a = {6,3,4};
* auto tup = make_int_tuple<5>(a, a.size(), 0) // (6,3,4,0,0)
* \endcode
*/
template <int N, class Indexable, class T>
CUTE_HOST_DEVICE constexpr
auto
make_int_tuple(Indexable const& t, int n, T const& init)
{
static_assert(N > 0);
if constexpr (N == 1) {
return 0 < n ? t[0] : init;
} else {
return transform(make_seq<N>{}, [&](auto i) { return i < n ? t[i] : init; });
}
CUTE_GCC_UNREACHABLE;
}
/** Fill the dynamic values of a Tuple with values from another Tuple
* \code
* auto params = make_tuple(6,3,4);
* cute::tuple<Int<1>, cute::tuple<int, int, Int<3>>, int, Int<2>> result;
* fill_int_tuple_from(result, params); // (_1,(6,3,_3),4,_2)
* \endcode
*/
template <class Tuple, class TupleV>
CUTE_HOST_DEVICE constexpr
auto
fill_int_tuple_from(Tuple& result, TupleV const& vals)
{
return fold(result, vals, [](auto const& init, auto&& r) {
if constexpr (is_static<remove_cvref_t<decltype(r)>>::value) { // Skip static elements of result
return init;
} else if constexpr (is_tuple<remove_cvref_t<decltype(r)>>::value) { // Recurse into tuples
return fill_int_tuple_from(r, init);
} else { // Assign and consume arg
static_assert(tuple_size<remove_cvref_t<decltype(init)>>::value > 0, "Not enough values to fill with!");
r = get<0>(init);
return remove<0>(init);
}
CUTE_GCC_UNREACHABLE;
});
}
/** Make a "Tuple" by filling in the dynamic values in order from the arguments
* \code
* using result_t = cute::tuple<Int<1>, cute::tuple<int, int, Int<3>>, int, Int<2>>;
* auto result = make_int_tuple_from<result_t>(6,3,4); // (_1,(6,3,_3),4,_2)
* \endcode
*/
template <class Tuple, class... Ts>
CUTE_HOST_DEVICE constexpr
Tuple
make_int_tuple_from(Ts const&... ts)
{
Tuple result = Tuple{};
fill_int_tuple_from(result, cute::make_tuple(ts...));
return result;
}
/** Convert a tuple to a flat homogeneous array of type T
* \code
* auto tup = cute::make_tuple(Int<1>{}, cute::make_tuple(6,3,Int<3>{}),4,Int<2>{});
* cute::array<uint64_t,6> result = to_array<uint64_t>(tup); // [1,6,3,3,4,2]
* \endcode
*/
template <class T = int64_t, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
to_array(IntTuple const& t)
{
auto flat_t = flatten_to_tuple(t);
constexpr int N = tuple_size<decltype(flat_t)>::value;
cute::array<T,N> result;
for_each(make_seq<N>{}, [&] (auto i) { result[i] = get<i>(flat_t); });
return result;
}
//
// Comparison operators
//
//
// There are many ways to compare tuple of elements and because CuTe is built
// on parameterizing layouts of coordinates, some comparisons are appropriate
// only in certain cases.
// -- lexicographical comparison [reverse, reflected, revref] : Correct for coords in RowMajor Layout
// -- colexicographical comparison [reverse, reflected, revref] : Correct for coords in ColMajor Layout
// -- element-wise comparison [any,all] :
// This can be very confusing. To avoid errors in selecting the appropriate
// comparison, op<|op<=|op>|op>= are *not* implemented for cute::tuple.
//
// When actually desiring to order coordinates, the user should map them to
// their indices within the Layout they came from:
// e.g. layoutX(coordA) < layoutX(coordB)
// That said, we implement the three most common ways to compare tuples below.
// These are implemented with slighly more explicit names than op<.
//
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
lex_less(IntTupleA const& a, IntTupleB const& b);
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
colex_less(IntTupleA const& a, IntTupleB const& b);
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
elem_less(IntTupleA const& a, IntTupleB const& b);
namespace detail {
template <size_t I, class TupleA, class TupleB>
CUTE_HOST_DEVICE constexpr
auto
lex_less_impl(TupleA const& a, TupleB const& b)
{
if constexpr (I == tuple_size<TupleB>::value) {
return cute::false_type{}; // Terminal: TupleB is exhausted
} else if constexpr (I == tuple_size<TupleA>::value) {
return cute::true_type{}; // Terminal: TupleA is exhausted, TupleB is not exhausted
} else {
return lex_less(get<I>(a), get<I>(b)) || (get<I>(a) == get<I>(b) && lex_less_impl<I+1>(a,b));
}
CUTE_GCC_UNREACHABLE;
}
template <size_t I, class TupleA, class TupleB>
CUTE_HOST_DEVICE constexpr
auto
colex_less_impl(TupleA const& a, TupleB const& b)
{
if constexpr (I == tuple_size<TupleB>::value) {
return cute::false_type{}; // Terminal: TupleB is exhausted
} else if constexpr (I == tuple_size<TupleA>::value) {
return cute::true_type{}; // Terminal: TupleA is exhausted, TupleB is not exhausted
} else {
constexpr size_t A = tuple_size<TupleA>::value - 1 - I;
constexpr size_t B = tuple_size<TupleB>::value - 1 - I;
return colex_less(get<A>(a), get<B>(b)) || (get<A>(a) == get<B>(b) && colex_less_impl<I+1>(a,b));
}
CUTE_GCC_UNREACHABLE;
}
template <size_t I, class TupleA, class TupleB>
CUTE_HOST_DEVICE constexpr
auto
elem_less_impl(TupleA const& a, TupleB const& b)
{
if constexpr (I == tuple_size<TupleA>::value) {
return cute::true_type{}; // Terminal: TupleA is exhausted
} else if constexpr (I == tuple_size<TupleB>::value) {
return cute::false_type{}; // Terminal: TupleA is not exhausted, TupleB is exhausted
} else {
return elem_less(get<I>(a), get<I>(b)) && elem_less_impl<I+1>(a,b);
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
// Lexicographical comparison
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
lex_less(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
return detail::lex_less_impl<0>(a, b);
} else {
return a < b;
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
lex_leq(T const& t, U const& u) {
return !lex_less(u, t);
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
lex_gtr(T const& t, U const& u) {
return lex_less(u, t);
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
lex_geq(T const& t, U const& u) {
return !lex_less(t, u);
}
// Colexicographical comparison
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
colex_less(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
return detail::colex_less_impl<0>(a, b);
} else {
return a < b;
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
colex_leq(T const& t, U const& u) {
return !colex_less(u, t);
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
colex_gtr(T const& t, U const& u) {
return colex_less(u, t);
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
colex_geq(T const& t, U const& u) {
return !colex_less(t, u);
}
// Elementwise [all] comparison
template <class IntTupleA, class IntTupleB>
CUTE_HOST_DEVICE constexpr
auto
elem_less(IntTupleA const& a, IntTupleB const& b)
{
if constexpr (is_tuple<IntTupleA>::value && is_tuple<IntTupleB>::value) {
return detail::elem_less_impl<0>(a, b);
} else {
return a < b;
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
elem_leq(T const& t, U const& u) {
return !elem_less(u, t);
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
elem_gtr(T const& t, U const& u) {
return elem_less(u, t);
}
template <class T, class U>
CUTE_HOST_DEVICE constexpr
auto
elem_geq(T const& t, U const& u) {
return !elem_less(t, u);
}
namespace detail {
/** Increment a (dynamic) coord lexicographically within a shape
* @pre is_congruent<Coord,Shape>::value
* \code
* auto shape = make_shape(1,2,make_shape(2,3),3);
*
* int i = 0;
* for (auto coord = repeat_like(shape, 0); back(coord) != back(shape); increment(coord, shape)) {
* std::cout << i++ << ": " << coord << std::endl;
* }
* assert(i == size(shape));
* \endcode
*/
template <int I = 0, class Coord, class Shape>
CUTE_HOST_DEVICE constexpr
void
increment(Coord& coord, Shape const& shape)
{
if constexpr (is_integral<Coord>::value) {
++coord;
} else {
increment(get<I>(coord), get<I>(shape));
if constexpr (I+1 < tuple_size<Coord>::value) {
if (back(get<I>(coord)) == back(get<I>(shape))) {
back(get<I>(coord)) = 0;
increment<I+1>(coord, shape);
}
}
}
}
} // end namespace detail
struct ForwardCoordIteratorSentinal
{};
// A forward iterator for a starting coordinate in a shape's domain, and a shape.
// The starting coordinate may be zero but need not necessarily be.
template <class Coord, class Shape>
struct ForwardCoordIterator
{
static_assert(is_congruent<Coord, Shape>::value);
CUTE_HOST_DEVICE constexpr
Coord const& operator*() const { return coord; }
CUTE_HOST_DEVICE constexpr
ForwardCoordIterator& operator++() { detail::increment(coord, shape); return *this; }
// Sentinel for the end of the implied range
CUTE_HOST_DEVICE constexpr
bool operator< (ForwardCoordIteratorSentinal const&) const { return back(coord) < back(shape); }
CUTE_HOST_DEVICE constexpr
bool operator==(ForwardCoordIteratorSentinal const&) const { return back(coord) == back(shape); }
CUTE_HOST_DEVICE constexpr
bool operator!=(ForwardCoordIteratorSentinal const&) const { return back(coord) != back(shape); }
// NOTE: These are expensive, avoid use
CUTE_HOST_DEVICE constexpr
bool operator< (ForwardCoordIterator const& other) const { return colex_less(coord, other.coord); }
CUTE_HOST_DEVICE constexpr
bool operator==(ForwardCoordIterator const& other) const { return coord == other.coord; }
CUTE_HOST_DEVICE constexpr
bool operator!=(ForwardCoordIterator const& other) const { return coord != other.coord; }
Coord coord;
Shape const& shape;
};
// A forward iterator for a coordinate that starts from a provided coordinate
template <class Shape, class Coord>
CUTE_HOST_DEVICE constexpr
auto
make_coord_iterator(Coord const& coord, Shape const& shape)
{
return ForwardCoordIterator<Coord,Shape>{coord,shape};
}
// A forward iterator for a coordinate that starts from zero
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
make_coord_iterator(Shape const& shape)
{
auto coord = repeat_like(shape, int(0));
return make_coord_iterator(coord, shape);
}
} // end namespace cute
| include/cute/int_tuple.hpp/0 | {
"file_path": "include/cute/int_tuple.hpp",
"repo_id": "include",
"token_count": 11080
} | 20 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/int_tuple.hpp>
namespace cute
{
/** crd2idx(c,s,d) maps a coordinate within <Shape,Stride> to an index
*
* This is computed as follows:
* [coord, shape, and stride are all integers => step forward by stride]
* op(c, s, d) => c * d
* [coord is integer, shape and stride are tuple => divmod coord for each mode]
* op(c, (s,S), (d,D)) => op(c % prod(s), s, d) + op(c / prod(s), (S), (D))
* [coord, shape, and stride are all tuples => consider each mode independently]
* op((c,C), (s,S), (d,D)) => op(c, s, d) + op((C), (S), (D))
*/
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
crd2idx(Coord const& coord,
Shape const& shape,
Stride const& stride);
namespace detail {
template <class Coord, class Shape, class Stride, int... Is>
CUTE_HOST_DEVICE constexpr
auto
crd2idx_ttt(Coord const& coord,
Shape const& shape,
Stride const& stride, seq<Is...>)
{
return (... + crd2idx(get<Is>(coord), get<Is>(shape), get<Is>(stride)));
}
template <class CInt, class STuple, class DTuple, int I0, int... Is>
CUTE_HOST_DEVICE constexpr
auto
crd2idx_itt(CInt const& coord,
STuple const& shape,
DTuple const& stride, seq<I0,Is...>)
{
if constexpr (sizeof...(Is) == 0) { // Avoid recursion and mod on single/last iter
return crd2idx(coord, get<I0>(shape), get<I0>(stride));
} else if constexpr (is_constant<0, CInt>::value) {
return crd2idx(_0{}, get<I0>(shape), get<I0>(stride))
+ (_0{} + ... + crd2idx(_0{}, get<Is>(shape), get<Is>(stride)));
} else { // General case
return crd2idx(coord % product(get<I0>(shape)), get<I0>(shape), get<I0>(stride))
+ crd2idx_itt(coord / product(get<I0>(shape)), shape, stride, seq<Is...>{});
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
crd2idx(Coord const& coord,
Shape const& shape,
Stride const& stride)
{
if constexpr (is_tuple<Coord>::value) {
if constexpr (is_tuple<Shape>::value) { // tuple tuple tuple
static_assert(tuple_size<Coord>::value == tuple_size< Shape>::value, "Mismatched Ranks");
static_assert(tuple_size<Coord>::value == tuple_size<Stride>::value, "Mismatched Ranks");
return detail::crd2idx_ttt(coord, shape, stride, tuple_seq<Coord>{});
} else { // tuple "int" "int"
static_assert(sizeof(Coord) == 0, "Invalid parameters");
}
} else {
if constexpr (is_tuple<Shape>::value) { // "int" tuple tuple
static_assert(tuple_size<Shape>::value == tuple_size<Stride>::value, "Mismatched Ranks");
return detail::crd2idx_itt(coord, shape, stride, tuple_seq<Shape>{});
} else { // "int" "int" "int"
return coord * stride;
}
}
CUTE_GCC_UNREACHABLE;
}
namespace detail {
template <class CTuple, class STuple, int I0, int... Is>
CUTE_HOST_DEVICE constexpr
auto
crd2idx_horner(CTuple const& coord,
STuple const& shape, seq<I0,Is...>)
{
if constexpr (sizeof...(Is) == 0) { // No recursion on single/last iter
return get<I0>(coord);
} else { // General case
return get<I0>(coord) + get<I0>(shape) * crd2idx_horner(coord, shape, seq<Is...>{});
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
/** crd2idx(c,s) maps a coordinate within Shape to an index
* via a colexicographical enumeration of coordinates in Shape.
* i = c0 + s0 * (c1 + s1 * (c2 + s2 * ...))
*/
template <class Coord, class Shape>
CUTE_HOST_DEVICE constexpr
auto
crd2idx(Coord const& coord,
Shape const& shape)
{
if constexpr (is_integral<Coord>::value) { // Coord is already an index
return coord;
} else if constexpr (is_integral<Shape>::value) {
static_assert(dependent_false<Shape>, "Invalid parameters");
} else { // Make congruent, flatten, and apply Horner's method
static_assert(tuple_size<Coord>::value == tuple_size<Shape>::value, "Mismatched Ranks");
auto flat_coord = flatten(coord);
auto flat_shape = flatten(product_like(shape, coord));
return detail::crd2idx_horner(flat_coord, flat_shape, tuple_seq<decltype(flat_shape)>{});
}
CUTE_GCC_UNREACHABLE;
}
/** idx2crd(i,s,d) splits an index into a coordinate within <Shape,Stride>.
*
* This is computed as follows:
* [index, shape, and stride are all integers => determine 1D coord]
* op(i, s, d) => (i / d) % s
* [index is integer, shape and stride are tuple => determine component for each mode]
* op(i, (s,S), (d,D)) => (op(i, s, d), op(i, S, D)...)
* [index, shape, and stride are all tuples => consider each mode independently]
* op((i,I), (s,S), (d,D)) => (op(i, s, d), op((I), (S), (D)))
*
* NOTE: This only works for compact shape+stride layouts. A more general version would
* apply to all surjective layouts
*/
template <class Index, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
idx2crd(Index const& idx,
Shape const& shape,
Stride const& stride)
{
if constexpr (is_tuple<Index>::value) {
if constexpr (is_tuple<Shape>::value) { // tuple tuple tuple
static_assert(tuple_size<Index>::value == tuple_size< Shape>::value, "Mismatched Ranks");
static_assert(tuple_size<Index>::value == tuple_size<Stride>::value, "Mismatched Ranks");
return transform(idx, shape, stride, [](auto const& i, auto const& s, auto const& d){ return idx2crd(i,s,d); });
} else { // tuple "int" "int"
static_assert(sizeof(Index) == 0, "Invalid parameters");
}
} else {
if constexpr (is_tuple<Shape>::value) {
if constexpr (is_tuple<Stride>::value) { // "int" tuple tuple
static_assert(tuple_size<Shape>::value == tuple_size<Stride>::value, "Mismatched Ranks");
return transform(shape, stride, [&](auto const& s, auto const& d){ return idx2crd(idx,s,d); });
} else { // "int" tuple "int"
return transform(shape, compact_col_major(shape, stride), [&](auto const& s, auto const& d){ return idx2crd(idx,s,d); });
}
} else { // "int" "int" "int"
if constexpr (is_constant<1, Shape>::value) {
// Skip potential stride-0 division
return Int<0>{};
} else {
return (idx / stride) % shape;
}
}
}
CUTE_GCC_UNREACHABLE;
}
/** idx2crd(i,s) splits an index into a coordinate within Shape
* via a colexicographical enumeration of coordinates in Shape.
* c0 = (idx / 1) % s0
* c1 = (idx / s0) % s1
* c2 = (idx / (s0 * s1)) % s2
* ...
*/
template <class Index, class Shape>
CUTE_HOST_DEVICE constexpr
auto
idx2crd(Index const& idx,
Shape const& shape)
{
if constexpr (is_tuple<Index>::value) {
if constexpr (is_tuple<Shape>::value) { // tuple tuple
static_assert(tuple_size<Index>::value == tuple_size<Shape>::value, "Mismatched Ranks");
return transform(idx, shape, [](auto const& i, auto const& s) { return idx2crd(i,s); });
} else { // tuple "int"
static_assert(sizeof(Index) == 0, "Invalid parameters");
}
} else {
if constexpr (is_tuple<Shape>::value) { // "int" tuple
return idx2crd(idx, shape, compact_col_major(shape));
} else { // "int" "int"
return idx;
}
}
CUTE_GCC_UNREACHABLE;
}
//
// crd2crd
//
template <class Coord, class SShape, class DShape>
CUTE_HOST_DEVICE constexpr
auto
crd2crd(Coord const& coord,
SShape const& src_shape,
DShape const& dst_shape)
{
if constexpr (is_tuple<Coord>::value && is_tuple<SShape>::value && is_tuple<DShape>::value) {
static_assert(tuple_size<Coord>::value == tuple_size<SShape>::value, "Mismatched Ranks");
static_assert(tuple_size<Coord>::value == tuple_size<DShape>::value, "Mismatched Ranks");
return transform(coord, src_shape, dst_shape, [](auto const& c, auto const& s, auto const& d) { return crd2crd(c,s,d); });
} else {
// assert(size(src_shape) == size(dst_shape))
return idx2crd(crd2idx(coord, src_shape), dst_shape);
}
CUTE_GCC_UNREACHABLE;
}
//
// Compact Major
//
// Tags for common layouts and dispatching
struct LayoutLeft; // Col-major layout mapping; leftmost extent has stride 1
using GenColMajor = LayoutLeft; // Alias
struct LayoutRight; // Row-major layout mapping; rightmost extent has stride 1
using GenRowMajor = LayoutRight; // Alias
namespace detail {
// For GCC8.5 -- Use of lambdas in unevaluated contexts. Instead use function objects.
template <class Major>
struct CompactLambda;
// @pre is_integral<Current>
// Return (result, current * product(shape)) to enable recurrence
template <class Major, class Shape, class Current>
CUTE_HOST_DEVICE constexpr
auto
compact(Shape const& shape,
Current const& current)
{
if constexpr (is_tuple<Shape>::value) { // Shape::tuple Current::int
using Lambda = CompactLambda<Major>; // Append or Prepend
using Seq = typename Lambda::template seq<Shape>; // Seq or RSeq
return cute::detail::fold(shape, cute::make_tuple(cute::make_tuple(), current), Lambda{}, Seq{});
} else { // Shape::int Current::int
if constexpr (is_constant<1, Shape>::value) {
return cute::make_tuple(Int<0>{}, current); // If current is dynamic, this could save a reg
} else {
return cute::make_tuple(current, current * shape);
}
}
CUTE_GCC_UNREACHABLE;
}
// For GCC8.5 -- Specialization LayoutLeft
template <>
struct CompactLambda<LayoutLeft>
{
template <class Init, class Shape>
CUTE_HOST_DEVICE constexpr auto
operator()(Init const& init, Shape const& si) {
auto result = detail::compact<LayoutLeft>(si, get<1>(init));
return cute::make_tuple(append(get<0>(init), get<0>(result)), get<1>(result)); // Append
}
template <class Shape>
using seq = tuple_seq<Shape>; // Seq
};
// For GCC8.5 -- Specialization LayoutRight
template <>
struct CompactLambda<LayoutRight>
{
template <class Init, class Shape>
CUTE_HOST_DEVICE constexpr auto
operator()(Init const& init, Shape const& si) {
auto result = detail::compact<LayoutRight>(si, get<1>(init));
return cute::make_tuple(prepend(get<0>(init), get<0>(result)), get<1>(result)); // Prepend
}
template <class Shape>
using seq = tuple_rseq<Shape>; // RSeq
};
} // end namespace detail
template <class Major, class Shape, class Current = Int<1>,
__CUTE_REQUIRES(is_tuple<Shape>::value || is_integral<Shape>::value)>
CUTE_HOST_DEVICE constexpr
auto
compact_major(Shape const& shape,
Current const& current = {})
{
if constexpr (is_tuple<Current>::value) { // Shape::tuple Current::tuple
static_assert(is_tuple<Shape>::value, "Invalid parameters");
static_assert(tuple_size<Shape>::value == tuple_size<Current>::value, "Mismatched Ranks");
// Recurse to apply to the terminals of current
return transform(shape, current, [&](auto const& s, auto const& c){ return compact_major<Major>(s,c); });
} else {
return get<0>(detail::compact<Major>(shape, current));
}
CUTE_GCC_UNREACHABLE;
}
//
// Compact Col Major
//
struct LayoutLeft {
template <class Shape>
using Apply = decltype(compact_major<LayoutLeft>(declval<Shape>()));
};
template <class Shape, class Current = Int<1>>
CUTE_HOST_DEVICE constexpr
auto
compact_col_major(Shape const& shape,
Current const& current = {})
{
return compact_major<LayoutLeft>(shape, current);
}
//
// Compact Row Major
//
struct LayoutRight {
template <class Shape>
using Apply = decltype(compact_major<LayoutRight>(declval<Shape>()));
};
template <class Shape, class Current = Int<1>>
CUTE_HOST_DEVICE constexpr
auto
compact_row_major(Shape const& shape,
Current const& current = {})
{
return compact_major<LayoutRight>(shape, current);
}
//
// Compact Order -- compute a compact stride based on an ordering of the modes
//
namespace detail {
// @pre weakly_congruent(order, shape)
// @pre is_congruent<RefShape, RefOrder>
// @pre is_static<Order>
// @pre is_static<RefOrder>
template <class Shape, class Order, class RefShape, class RefOrder>
CUTE_HOST_DEVICE constexpr
auto
compact_order(Shape const& shape, Order const& order,
RefShape const& ref_shape, RefOrder const& ref_order)
{
if constexpr (is_tuple<Order>::value) {
static_assert(tuple_size<Shape>::value == tuple_size<Order>::value, "Need equal rank of shape and order");
return transform(shape, order, [&](auto const& s, auto const& o) { return compact_order(s, o, ref_shape, ref_order); });
} else {
// Compute the starting stride for this shape by accumulating all shapes corresponding to lesser orders
auto stride_start = product(transform(ref_shape, ref_order,
[&](auto const& s, auto const& o) {
return conditional_return(o < order, s, Int<1>{});
}));
return compact_col_major(shape, stride_start);
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
template <class Shape, class Order>
CUTE_HOST_DEVICE constexpr
auto
compact_order(Shape const& shape, Order const& order)
{
auto ref_shape = flatten_to_tuple(product_like(shape, order));
auto flat_order = flatten_to_tuple(order);
// Find the largest static element of order
auto max_order = cute::fold(flat_order, Int<0>{}, [](auto v, auto order) {
if constexpr (is_constant<true, decltype(v < order)>::value) {
return order;
} else {
return v;
}
CUTE_GCC_UNREACHABLE;
});
// Replace any dynamic elements within order with large-static elements
auto max_seq = make_range<max_order+1, max_order+1+rank(flat_order)>{};
auto ref_order = cute::transform(max_seq, flat_order, [](auto seq_v, auto order) {
if constexpr (is_static<decltype(order)>::value) {
return order;
} else {
return seq_v;
}
CUTE_GCC_UNREACHABLE;
});
auto new_order = unflatten(ref_order, order);
return detail::compact_order(shape, new_order, ref_shape, ref_order);
}
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
compact_order(Shape const& shape, GenColMajor const& major)
{
return compact_major<LayoutLeft>(shape);
}
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
compact_order(Shape const& shape, GenRowMajor const& major)
{
return compact_major<LayoutRight>(shape);
}
} // end namespace cute
| include/cute/stride.hpp/0 | {
"file_path": "include/cute/stride.hpp",
"repo_id": "include",
"token_count": 6621
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/arch.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the operation implied by MMA.
struct OpMultiplyAdd {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the result is saturated to MAX_FLOAT|MIN_FLOAT or MAX_INT|MIN_INT
struct OpMultiplyAddSaturate {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to a narrower type (BF16)
struct OpMultiplyAddFastBF16 {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to a narrower type (F16)
struct OpMultiplyAddFastF16 {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input data types are mixed and the narrower type is
/// upcasted to the wider type
struct OpMultiplyAddMixedInputUpcast {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to 2 (big and small) TF32 components
// Perform 3xTF32 or 4xTF32 for every F32 output element
struct OpMultiplyAddFastF32 {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to 2 (big and small) TF32 components
// Perform 3xTF32 or 4xTF32 for every complex<F32> output element
struct OpMultiplyAddComplexFastF32 {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating that staged accumulation is not to be used. This is valid only for SM89
/// FP8 kernels.
struct OpMultiplyAddFastAccum;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the complex multiply-add operation
struct OpMultiplyAddComplex {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the gaussian complex multiply-add operation
struct OpMultiplyAddGaussianComplex {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the inner product is defined by (XOR, POPC)
struct OpXorPopc {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the inner product is defined by (AND, POPC)
struct OpAndPopc {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying math operators as thread-level operations.
struct OpClassSimt {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying operators as Tensor Core operations.
struct OpClassTensorOp {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying operators as WMMA Tensor Core operations
struct OpClassWmmaTensorOp {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying operators as Tensor Core with structure sparse operations.
struct OpClassSparseTensorOp {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Number of threads participating
int kThreads_,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator
>
struct Mma;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation - specialized for 1x1x1x1 matrix multiply operation
template <
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator_
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, ElementA, LayoutA, ElementB, LayoutB, ElementC_, LayoutC, Operator_> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = Operator_;
using ElementC = ElementC_;
CUTLASS_HOST_DEVICE
void operator()(
Array<ElementC, 1> &d,
Array<ElementA, 1> const &a,
Array<ElementB, 1> const &b,
Array<ElementC, 1> const &c
) {
multiply_add<ElementA, ElementB, ElementC> op;
d[0] = op(a[0], b[0], c[0]);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specifies internal data type for computation
struct SPFormatType {
enum Kind {
Thread
};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Number of threads participating
int kThreads_,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator,
/// Specifies meta data format
SPFormatType::Kind SPFormat = SPFormatType::Thread
>
struct SparseMma;
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations for each compute capability
//
#include "cutlass/arch/mma_sm50.h"
#include "cutlass/arch/mma_sm60.h"
#include "cutlass/arch/mma_sm61.h"
#include "cutlass/arch/mma_sm70.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/arch/mma_sparse_sm80.h"
#include "cutlass/arch/mma_sm89.h"
#include "cutlass/arch/mma_sparse_sm89.h"
#include "cutlass/arch/mma_sm90.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
namespace detail {
/// Helper for determining whether staged accumulation should be used for a given operator
template <typename Operator>
struct UseStagedAccumulation {
static bool const value = platform::is_same<typename Operator::MathOperator, OpMultiplyAddFastF32>::value ||
platform::is_same<typename Operator::MathOperator, OpMultiplyAddComplexFastF32>::value ||
is_sm89_staged_policy_v<Operator>;
};
} // namespace detail
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/arch/mma.h/0 | {
"file_path": "include/cutlass/arch/mma.h",
"repo_id": "include",
"token_count": 2356
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for half
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
cutlass::half_t, ///< ElementA
LayoutA_, ///< LayoutA
cutlass::half_t, ///< ElementB
LayoutB_, ///< LayoutB
ElementC_, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM70_ENABLED)
using Shape = Shape_;
using ElementA = cutlass::half_t;
using LayoutA = LayoutA_;
using ElementB = cutlass::half_t;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpMultiplyAdd;
using ArchTag = arch::Sm70;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<16, 16, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape< 8, 32, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape<32, 8, 16>, Shape>::value,
"Supported list of wmma operator shape for f16 multiplicands are: 16x16x16, 8x32x16, and 32x8x16");
// check supported wmma output data type for the given multiplicand data types
static_assert(
platform::is_same<cutlass::half_t, ElementC>::value || platform::is_same<float, ElementC>::value,
"Supported of wmma output data type for f16 multiplicands are: f16 and f32");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::mma_sync(D, A, B, C);
}
#else
static_assert(false, "wmma.mma.sync for floating point multiplicands is avialable only for SM70 and beyond");
#endif
};
} // namespace arch
} // namespace cutlass
| include/cutlass/arch/wmma_sm70.h/0 | {
"file_path": "include/cutlass/arch/wmma_sm70.h",
"repo_id": "include",
"token_count": 1912
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_fixed_channels.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_few_channels.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_fixed_channels.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_few_channels.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kUnity,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
> struct DefaultConv2dFprop;
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassTensorOp convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kFixedChannels,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFixedChannels<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFixedChannels<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and two stage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kFixedChannels,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFixedChannels<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFixedChannels<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kFewChannels,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFewChannels<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFewChannels<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kFewChannels,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFewChannels<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFewChannels<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB,
int InterleavedK
>
struct DefaultConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapA = typename MmaCore::SmemThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB = typename MmaCore::SmemThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Global,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm
/// and 2 stage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and 2 stage
/// pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB,
int InterleavedK
>
struct DefaultConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
2, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapA = typename MmaCore::SmemThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB = typename MmaCore::SmemThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and
/// multistage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag
>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and
// multistage pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB,
int InterleavedK
>
struct DefaultConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true
>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::SmemThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::SmemThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Global,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm
/// and 2 stage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm and 2 stage
/// pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB,
int InterleavedK
>
struct DefaultConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
2, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::SmemThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::SmemThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_conv2d_fprop.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_conv2d_fprop.h",
"repo_id": "include",
"token_count": 19809
} | 24 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Defines a GEMM with Broadcast based on an existing UniversalGemm kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_deconv3d.h"
#include "cutlass/conv/kernel/implicit_gemm_convolution_with_fused_epilogue.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h"
#include "cutlass/epilogue/threadblock/epilogue_with_broadcast.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kStrided,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
>
struct DefaultDeconv3dWithBroadcast {
using ImplicitGemmBase = typename DefaultDeconv3d<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
StrideSupport
>::Kernel;
// Define epilogue
using Epilogue = typename cutlass::conv::kernel::detail::DefaultConvEpilogueWithBroadcastTensorOp<
ArchTag,
typename ImplicitGemmBase::Epilogue::Shape,
typename ImplicitGemmBase::Epilogue::WarpMmaOperator,
ImplicitGemmBase::Epilogue::kPartitionsK,
ElementC,
typename EpilogueOutputOp::ElementT,
typename EpilogueOutputOp::ElementVector,
EpilogueOutputOp,
ImplicitGemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionWithFusedEpilogue<
typename ImplicitGemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Deconv3d specialization for Analytic IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv3dWithBroadcast <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
conv::StrideSupport::kUnity,
AlignmentA,
AlignmentB
> {
using ImplicitGemmBase = typename DefaultDeconv3d<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
conv::StrideSupport::kUnity
>::Kernel;
// Define epilogue
using Epilogue = typename cutlass::conv::kernel::detail::DefaultConvEpilogueWithBroadcastSimt<
ArchTag,
typename ImplicitGemmBase::Epilogue::Shape,
typename ImplicitGemmBase::Epilogue::WarpMmaOperator,
ElementC,
typename EpilogueOutputOp::ElementT,
typename EpilogueOutputOp::ElementVector,
EpilogueOutputOp,
ImplicitGemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionWithFusedEpilogue<
typename ImplicitGemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm,
int AlignmentA,
int AlignmentB
>
struct DefaultDeconv3dWithBroadcast <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
conv::StrideSupport::kStrided,
AlignmentA,
AlignmentB
> {
using ImplicitGemmBase = typename DefaultDeconv3d<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm,
conv::StrideSupport::kStrided
>::Kernel;
// Define epilogue
using Epilogue = typename cutlass::conv::kernel::detail::DefaultConvEpilogueWithBroadcastSimtStridedDgrad<
ArchTag,
typename ImplicitGemmBase::Epilogue::Shape,
typename ImplicitGemmBase::Epilogue::WarpMmaOperator,
ElementC,
typename EpilogueOutputOp::ElementT,
typename EpilogueOutputOp::ElementVector,
EpilogueOutputOp,
ImplicitGemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionWithFusedEpilogue<
typename ImplicitGemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kDeconv,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_deconv3d_with_broadcast.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_deconv3d_with_broadcast.h",
"repo_id": "include",
"token_count": 2862
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dFpropActivationTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv3dAnalyticParams<Layout>;
private:
Params const ¶ms_;
ConvProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
int filter_t_;
int filter_r_;
int filter_s_;
int filter_c_;
int offset_n_[ThreadMap::Iterations::kStrided];
int offset_z_[ThreadMap::Iterations::kStrided];
int offset_p_[ThreadMap::Iterations::kStrided];
int offset_q_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv3dFpropActivationTileAccessIteratorAnalytic(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_t_(0),
filter_r_(0),
filter_s_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.column() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
int offset_nzpq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
offset_n_[s] = offset_nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q);
int residual = offset_nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q);
offset_z_[s] = residual / (problem_size_.P * problem_size_.Q);
residual = residual % (problem_size_.P * problem_size_.Q);
offset_p_[s] = residual / problem_size_.Q;
offset_q_[s] = residual % problem_size_.Q;
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
return;
}
filter_t_ = 0;
filter_c_ += Shape::kColumn * problem_size_.split_k_slices;
}
/// Returns the coordinate in the activations tensor X that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int n = offset_n_[iteration_strided_];
int z = offset_z_[iteration_strided_];
int p = offset_p_[iteration_strided_];
int q = offset_q_[iteration_strided_];
int t = filter_t_;
int r = filter_r_;
int s = filter_s_;
if (problem_size_.mode == Mode::kConvolution) {
t = (problem_size_.T - 1 - filter_t_);
r = (problem_size_.R - 1 - filter_r_);
s = (problem_size_.S - 1 - filter_s_);
}
int d = z * problem_size_.stride_d - problem_size_.pad_d + t * problem_size_.dilation_d;
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
return TensorCoord(n, d, h, w, filter_c_);
}
/// Returns true if the current coordinate is within the activations tensor X
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.d() >= 0 && coord.d() < problem_size_.D &&
coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W &&
coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
AccessType const *ptr = reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
return ptr;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dFpropActivationTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 3221
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting depthwise related simt instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/warp/mma_depthwise_simt.h"
#include "cutlass/gemm/threadblock/mma_pipelined.h"
#include "cutlass/gemm/threadblock/mma_singlestage.h"
#include "cutlass/gemm/threadblock/mma_base.h"
#include "cutlass/conv/threadblock/depthwise_mma_base.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h"
#include "cutlass/arch/cache_operation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
namespace detail {
//
// Convert a WarpShapeM which is the whole tile of elements into the number of elements (2D) held by
// each partitions within warp.
// The goal is for each thread's tile of elements to be as square as
// possible for performance (4x4 will be faster than 2x8).
template<int WarpShapeM, // The number of elements (1D) contained in the entire warp
int WarpNumThreadsM> // The number of partitions within the warp
struct SimtWarpShape {
// kP * kQ * WarpNumThreadsM = WarpShapeM
// If needed, enable more specializations.
};
template <>
struct SimtWarpShape<4, 4> {
static constexpr int kP = 1;
static constexpr int kQ = 1;
};
template <>
struct SimtWarpShape<4, 2> {
static constexpr int kP = 2;
static constexpr int kQ = 1;
};
template <>
struct SimtWarpShape<4, 1> {
static constexpr int kP = 2;
static constexpr int kQ = 2;
};
template <>
struct SimtWarpShape<8, 1> {
static constexpr int kP = 2;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<8, 2> {
static constexpr int kP = 2;
static constexpr int kQ = 2;
};
template <>
struct SimtWarpShape<8, 4> {
static constexpr int kP = 1;
static constexpr int kQ = 2;
};
template <>
struct SimtWarpShape<16, 1> {
static constexpr int kP = 4;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<16, 2> {
static constexpr int kP = 2;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<16, 4> {
static constexpr int kP = 2;
static constexpr int kQ = 2;
};
template <int WarpNumThreadsM>
struct SimtWarpShape<25, WarpNumThreadsM> {
static_assert(WarpNumThreadsM == 1, "WarpShapeM could not be evenly splited by threads");
static constexpr int kP = 5;
static constexpr int kQ = 5;
};
template <>
struct SimtWarpShape<32, 1> {
static constexpr int kP = 4;
static constexpr int kQ = 8;
};
template <>
struct SimtWarpShape<32, 2> {
static constexpr int kP = 4;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<32, 4> {
static constexpr int kP = 2;
static constexpr int kQ = 4;
};
} // namespace detail
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_ = 0,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeB_ = 0,
/// Number of stages
int Stages = 2,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global,
/// per-element transformation for elements of A
ComplexTransform TransformA = ComplexTransform::kNone,
/// per-element transformation for elements of B
ComplexTransform TransformB = ComplexTransform::kNone,
bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value)
>
struct DepthwiseMmaCoreWithLaneAccessSize;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of threadblock-scoped output tile
typename ThreadBlockOutputShape,
/// Shape of filter shape per threadblock
typename FilterShape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_ = 0,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeB_ = 0,
/// Number of stages
int Stages = 2,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Iterator algo type
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape = cutlass::MatrixShape<-1, -1>,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape = cutlass::MatrixShape<-1, -1>,
/// Activation Shape loaded by threadblock
typename ActivationShape = cutlass::conv::TensorNHWCShape<-1,-1,-1,-1>,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global,
/// per-element transformation for elements of A
ComplexTransform TransformA = ComplexTransform::kNone,
/// per-element transformation for elements of B
ComplexTransform TransformB = ComplexTransform::kNone,
bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value)
>
struct DepthwiseDirectConvMmaCoreWithLaneAccessSize;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// per-element transformation for elements of A
ComplexTransform TransformA,
/// per-element transformation for elements of B
ComplexTransform TransformB,
bool IsComplex
>
struct DepthwiseMmaCoreWithLaneAccessSize<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, -1, -1, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> : cutlass::gemm::threadblock::DefaultMmaCore<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a warp-scoped per thread access (a value of -1 indicates the default)
int kLaneAccessSizeA_,
/// Size of a warp-scoped per thread access (a value of -1 indicates the default)
int kLaneAccessSizeB_,
/// Operation performed by GEMM
typename Operator_>
struct DepthwiseMmaCoreWithLaneAccessSize<Shape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
kLaneAccessSizeA_,
kLaneAccessSizeB_,
2,
Operator_> : public cutlass::gemm::threadblock::DefaultMmaCore<Shape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
2,
Operator_> {
using Base = cutlass::gemm::threadblock::DefaultMmaCore<Shape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
2,
Operator_>;
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const kLaneAccessSizeA = kLaneAccessSizeA_;
static int const kLaneAccessSizeB = kLaneAccessSizeB_;
// Divisility requirements
static_assert( kLaneAccessSizeA > 0 && kLaneAccessSizeB > 0,
"Size of a warp-scoped per thread access should be larger then ZERO" );
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = typename Base::WarpCount;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value;
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory are same as base class
//
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = cutlass::gemm::threadblock::detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = kLaneAccessSizeA / sizeof_bits<ElementA>::value;
static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static int const kPaddingM = cutlass::gemm::threadblock::detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static int const kPaddingN = cutlass::gemm::threadblock::detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<>
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = cutlass::gemm::threadblock::MmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of threadblock-scoped output tile (concept: TensorNHWCShape)
typename ThreadBlockOutputShape_,
/// Shape of filter shape per threadblock
typename FilterShape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_,
/// Number of stages
int Stages_,
/// Operation performed by GEMM
typename Operator_>
struct DepthwiseDirectConvMmaCoreWithLaneAccessSize<Shape_,
ThreadBlockOutputShape_,
FilterShape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
kLaneAccessSizeA_,
128,
Stages_,
Operator_> {
using Shape = Shape_;
using FilterShape = FilterShape_;
using WarpShape = WarpShape_;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const kLaneAccessSizeB = 128;
// Divisility requirements
static_assert( kLaneAccessSizeB > 0,
"Size of a warp-scoped per thread access should be larger then ZERO" );
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = cutlass::gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
1
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
// For Gmem load
static int const kElementsPerAccessA = 128 / sizeof_bits<ElementA>::value;
static int const kElementsPerAccessB = 128 / sizeof_bits<ElementB>::value;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, 1>, // Set kStrided = 1 because activation shape is runtime value.
kThreads,
kElementsPerAccessA
>;
/// ThreadMap of iterator A
using SmemThreadMapA = IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<1, Shape::kN>, // set kRow is 1 because it is a runtime value
ElementA,
SmemLayoutA,
0,
SmemThreadMapA, // was IteratorThreadMapA
true // Dynamic iterations.
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, FilterShape::kCount>,
kThreads,
kElementsPerAccessB
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<FilterShape::kCount, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB, // was IteratorThreadMapB
false // static iterations.
>;
//
// Warp-level matrix multiply operator
//
// Groups per threads
// Fp32: 2 groups
// Fp16: 2 groups
static const int GroupsPerThread = sizeof(ElementB) > 1 ? 2 : 4;
// Define the warp-level op
static const int WarpNumThreadsN = cutlass::const_min(WarpShape::kN / GroupsPerThread, kWarpSize);
static const int WarpNumThreadsM = kWarpSize / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
// Get output P, Q per thread
static const int TileP = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kP;
static const int TileQ = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kQ;
static const int LaneLayout = 1;
static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value;
static const int LaneN = cutlass::const_min(numElementsB, WarpShape::kN / WarpNumThreadsN);
// Define the output tile computed by each thread
using ThreadOutputShape = cutlass::conv::TensorNHWCShape<1, TileP, TileQ, LaneN>;
// Fetch the channel with same access size
static const int LaneM = LaneN;
// No paddings
static int const kPaddingM = 0;
static int const kPaddingN = 0;
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseDirectConvSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<>
FilterShape, /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width>
ThreadOutputShape, /// Size of the output tile computed by thread - concept: conv::TensorNHWCShape<>
ThreadBlockOutputShape_, /// Size of the output tile computed by threadblock - concept: conv::TensorNHWCShape<>
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = cutlass::conv::threadblock::DepthwiseDirectConvMmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
IteratorThreadMapA,
IteratorThreadMapB,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of threadblock-scoped output tile (concept: TensorNHWCShape)
typename ThreadBlockOutputShape_,
/// Shape of filter shape per threadblock
typename FilterShape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_,
/// Number of stages
int Stages_,
/// Operation performed by GEMM
typename Operator_,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape_,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape_,
/// Activation Shape loaded by threadblock
typename ActivationShape_>
struct DepthwiseDirectConvMmaCoreWithLaneAccessSize<Shape_,
ThreadBlockOutputShape_,
FilterShape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
kLaneAccessSizeA_,
128,
Stages_,
Operator_,
IteratorAlgorithm::kFixedStrideDilation,
StrideShape_,
DilationShape_,
ActivationShape_> {
using Shape = Shape_;
using FilterShape = FilterShape_;
using WarpShape = WarpShape_;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
using StrideShape = StrideShape_;
using DilationShape = DilationShape_;
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
using ActivationShape = ActivationShape_;
static int const kLaneAccessSizeB = 128;
// Divisility requirements
static_assert( kLaneAccessSizeB > 0,
"Size of a warp-scoped per thread access should be larger then ZERO" );
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = cutlass::gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
1
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
// For Gmem load
static int const kElementsPerAccessA = 128 / sizeof_bits<ElementA>::value;
static int const kElementsPerAccessB = 128 / sizeof_bits<ElementB>::value;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<ActivationShape::kC, ActivationShape::kNHW>,
kThreads,
kElementsPerAccessA
>;
/// ThreadMap of iterator A
using SmemThreadMapA = IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<ActivationShape::kNHW, ActivationShape::kC>,
ElementA,
SmemLayoutA,
0,
SmemThreadMapA, // was IteratorThreadMapA
false // static iterations.
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, FilterShape::kCount>,
kThreads,
kElementsPerAccessB
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<FilterShape::kCount, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB, // was IteratorThreadMapB
false // static iterations.
>;
//
// Warp-level matrix multiply operator
//
// Groups per threads
// Fp32: 2 groups
// Fp16: 2 groups
static const int GroupsPerThread = sizeof(ElementB) > 1 ? 2 : 4;
// Define the warp-level op
static const int WarpNumThreadsN = cutlass::const_min(WarpShape::kN / GroupsPerThread, kWarpSize);
static const int WarpNumThreadsM = kWarpSize / WarpNumThreadsN;
static const int TileP = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kP;
static const int TileQ = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kQ;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = 1;
static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value;
static const int LaneN = cutlass::const_min(numElementsB, WarpShape::kN / WarpNumThreadsN);
// Define the output tile computed by each thread
using ThreadOutputShape = cutlass::conv::TensorNHWCShape<1, TileP, TileQ, LaneN>;
// Fetch the channel with same access size
static const int LaneM = LaneN;
// No paddings
static int const kPaddingM = 0;
static int const kPaddingN = 0;
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseDirectConvSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<>
FilterShape, /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width>
ThreadOutputShape, /// Size of the output tile computed by thread - concept: conv::TensorNHWCShape<>
ThreadBlockOutputShape, /// Size of the output tile computed by threadblock - concept: conv::TensorNHWCShape<>
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
IteratorAlgorithm::kFixedStrideDilation, /// Iterator algo type
StrideShape, /// Stride ( MatrixShape<Height, Width> )
DilationShape, /// Dilation ( MatrixShape<Height, Width> )
ActivationShape /// Activation Shape loaded by threadblock
>;
/// Policy used to define MmaPipelined
using MmaPolicy = cutlass::conv::threadblock::DepthwiseDirectConvMmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
IteratorThreadMapA,
IteratorThreadMapB,
WarpCount::kK
>;
};
} // namespace threadblock
} // namespace conv
} // namespace cutlass
| include/cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h/0 | {
"file_path": "include/cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h",
"repo_id": "include",
"token_count": 14729
} | 27 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/detail/dependent_false.hpp"
#include "cutlass/epilogue/fusion/operations.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::fusion {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Dispatch interface for epilogue fusion callbacks
// For visitor fusions, this is just a convenience wrapper to provide metadata and non-nested args.
// It is also valid to just pass visitor callbacks directly to the collective, e.g. fusion::Sm90LinearCombination,
// provided the collective supports a visitor callbacks interface. This is useful for implementing custom fusions.
template <
class DispatchPolicy, // specialize on collective's dispatch policy since callbacks API will depend on collective's algorithm
class Operation, // the fusion operation being performed, e.g. fusion::LinearCombination
class CtaTile_MNK, // computed tile per CTA
class EpilogueTile_MN, // epilogue subtile size
class... Args // callbacks implementation dependent args (e.g. copy atoms, smem layouts)
>
struct FusionCallbacks {
static_assert(cutlass::detail::dependent_false<DispatchPolicy, Operation>, "Could not find a callbacks specialization.");
};
// Metadata helper to handle custom EVTs or other non-FusionCallbacks types
template <class T>
struct FusionCallbacksTraits {
using DispatchPolicy = void;
using Operation = T;
using CtaTile_MNK = void;
using EpilogueTile_MN = void;
using ElementCompute = void;
};
template <
class DispatchPolicy_,
class Operation_,
class CtaTile_MNK_,
class EpilogueTile_MN_,
class... Args
>
struct FusionCallbacksTraits<
FusionCallbacks<DispatchPolicy_, Operation_, CtaTile_MNK_, EpilogueTile_MN_, Args...>
> {
using DispatchPolicy = DispatchPolicy_;
using Operation = Operation_;
using CtaTile_MNK = CtaTile_MNK_;
using EpilogueTile_MN = EpilogueTile_MN_;
using ElementCompute = typename Operation::ElementCompute;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::fusion
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/fusion/callbacks.hpp/0 | {
"file_path": "include/cutlass/epilogue/fusion/callbacks.hpp",
"repo_id": "include",
"token_count": 1049
} | 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped complex GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_gaussian_complex_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization and defines sensible defaults for epilogues for complex*complex case
// 4 real-valued mma operations (Complex)
// A = (ar + j ai), B (br +j bi), D = AB
// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Epilogue Shape
typename Shape_,
/// Warp-level mma operator
typename WarpMmaTensorOp_,
/// Number of k partitions
int PartitionsK,
/// Epilogue output operator
typename OutputOp_,
/// Elements accessed by inner-most loop of AccumulatorFragmentIterator::load()
int ElementsPerAccess,
/// Multiply-add operator
/// Selects between (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_ = arch::OpMultiplyAddComplex
>
struct DefaultEpilogueComplexTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using Operator = Operator_;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
ElementAccumulator,
LayoutC
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator
>;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 0>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization and defines sensible defaults for epilogues for complex*complex case
// 3 real-valued mma operations (Gaussian Complex)
// A = (ar + j ai), B = (br +j bi), D = AB
// P1 = (ar + ai) * br, P2 = - ar * (br - bi), P3 = ai * (br + bi)
// D = dr + j di = (P1 - P3) + j (P1 + P2)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueComplexTensorOp <Shape_, WarpMmaTensorOp_, PartitionsK,
OutputOp_, ElementsPerAccess,
arch::OpMultiplyAddGaussianComplex
> {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using Operator = arch::OpMultiplyAddGaussianComplex;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorGaussianComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
ElementAccumulator,
LayoutC
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator
>;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 0>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h",
"repo_id": "include",
"token_count": 2868
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <typename Element_>
class DirectStoreEpilogueIterator {
public:
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = 1;
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout) {
stride = layout.stride(0) * sizeof(Element);
}
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
public:
//
// Data members
//
Element *pointer; // pointer to the output matrix
LongIndex stride; // stride in elements between rows
TensorCoord extent; // extent of output matrix
int thread_idx; // thread index
TensorCoord threadblock_offset;
public:
/// Constructor
CUTLASS_DEVICE
DirectStoreEpilogueIterator(
PredicatedTileIteratorParams const & params,
Element *pointer_,
TensorCoord extent_,
int thread_idx_,
TensorCoord threadblock_offset_ = TensorCoord(),
int const * indices = nullptr
):
pointer(pointer_),
stride(params.stride / sizeof(Element)),
extent(extent_),
thread_idx(thread_idx_),
threadblock_offset(threadblock_offset_)
{
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/direct_store_epilogue_iterator.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/direct_store_epilogue_iterator.h",
"repo_id": "include",
"token_count": 1332
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs.
This does not attempt to target any particular output layout. Instead, each threadblock
streams out its accumulator elements using 128b store operations. This assumes all threadblocks
have unique output tiles.
The target data layout is:
- threadblock indices mapped to linear offsets as (m, n, k), where m is fastest-changing
- threadblock output space partitioned into warps; each warp's region is contiguous
- per-thread accumulators partitioned into 128b accesses
- output memory striped across the threads of a warp
This enables very fast streaming of data, completely limited by the memory system. No predication
or data exchange is performed, and each threadblock is assumed to have a full region of memory
to write to.
This epilogue establishes an upper bound for epilogue performance and is suitable for
reductions across the GEMM K dimension which require a separate workspace.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_, ///< shape of accumulator tile (concept: MatrixShape)
int WarpCount, ///< number of warps
typename FragmentC_ ///< warp-level GEMM operator (concept: gemm::warp::Mma)
>
class EpilogueWorkspace {
public:
using Shape = Shape_;
using FragmentC = FragmentC_;
using ElementC = typename FragmentC::value_type;
static int const kWarpCount = WarpCount;
/// Optimize for 128b accesses
static int const kAccessSizeInBits = 128;
/// Warp size from the perspective of memory operations
static int const kWarpSize = 32;
/// Vector length of accesses
static int const kElementsPerAccess =
kAccessSizeInBits / sizeof_bits<ElementC>::value;
/// Number of stores per thread
static int const kIterations = FragmentC::kElements / kElementsPerAccess;
static_assert(
!(FragmentC::kElements % kElementsPerAccess),
"The number of accumulators must be divisible by the access size.");
/// Total number of vectorized accesses in warp (in units of vector)
static int const kWarpAccesses = kIterations * kWarpSize;
/// Total number of vectorized accesses in threadblock tile (in units of vector)
static int const kThreadblockAccesses = kWarpAccesses * kWarpCount;
/// Parameters structure
struct Params {
/// Pointer to C matrix
ElementC *ptr_C;
/// Stride between tiles along the GEMM N dimension (in units of vectors)
int stride_n;
/// Stride between tiles along the GEMM K dimension (in units of vectors)
int stride_k;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(
ElementC *ptr_C, ///< Pointer to C matrix
int stride_n_, ///< Stride between tiles along the GEMM N dimension (in units of ElementC)
int stride_k_ ///< Stride between tiles along the GEMM K dimension (in units of ElementC)
):
ptr_C(ptr_C), stride_n(stride_n_ / kElementsPerAccess), stride_k(stride_k_ / kElementsPerAccess) {
}
};
/// Shared storage allocation needed by the epilogue
struct SharedStorage {
// Intentionally empty
};
private:
struct alignas((kAccessSizeInBits / 8)) AccessType {
Array<ElementC, kElementsPerAccess> storage;
};
/// Constant reference to parameters object
AccessType *pointer_;
/// Stride between tiles along the n dimension (in vectors)
int stride_n_;
/// Stride between tiles along the k dimension (in vectors)
int stride_k_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWorkspace(
Params const ¶ms, ///< Host-constructable params object
SharedStorage &, ///< Shared storage object
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
pointer_(reinterpret_cast<AccessType *>(params.ptr_C)),
stride_n_(params.stride_n),
stride_k_(params.stride_k) {
// Add per-thread offset
pointer_ += lane_idx + warp_idx * kWarpAccesses;
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
cutlass::gemm::GemmCoord problem_size, ///< Problem size of GEMM (units of ElementC)
cutlass::gemm::GemmCoord tb_tile_coord, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
FragmentC const &accum) { ///< Accumulator tile
// Compute offset for entire threadblock (note, per-thread offset has been folded in already)
AccessType *pointer = pointer_ +
tb_tile_coord.m() * kThreadblockAccesses +
tb_tile_coord.n() * stride_n_ +
tb_tile_coord.k() * stride_k_;
// Cast to vectorized view of accumulator fragments
AccessType const * src_pointer = reinterpret_cast<AccessType const *>(&accum);
// Write out accumulators at full speed
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations; ++i) {
pointer[i * kWarpSize] = src_pointer[i];
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_workspace.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_workspace.h",
"repo_id": "include",
"token_count": 2162
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief PredicatedTileIteratorPredicates.
PredicatedTileIteratorPredicates enables both upper and lower bounds for predicates.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator predicates used to bound computations in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_ ///< Element data type
>
class PredicatedTileIteratorPredicates {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout):
PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
)
{
}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index lower_extent_row_;
Index upper_extent_row_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// Internal state counter
int state_[3];
//
// Static asserts about internal strides
//
static_assert(sizeof(lower_extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(upper_extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorPredicates(
PredicatedTileIteratorParams const & params,
TensorCoord lower_extent,
TensorCoord upper_extent,
int thread_idx,
TensorCoord threadblock_offset = TensorCoord()
):
params_(params)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
lower_extent_row_ = lower_extent.row();
upper_extent_row_ = upper_extent.row();
thread_start_row_ = thread_offset.row();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] = ((thread_offset.column()
+ ThreadMap::Delta::kColumn * c) < upper_extent.column()) &&
((thread_offset.column() + ThreadMap::Delta::kColumn * c) >= lower_extent.column());
}
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorPredicates &operator++() {
++state_[0];
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Gets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
///< Gets lower_extent_row_
CUTLASS_DEVICE Index get_lower_extent_row() {
return lower_extent_row_;
}
///< Gets upper_extent_row_
CUTLASS_DEVICE Index get_upper_extent_row() {
return upper_extent_row_;
}
///< Gets thread_start_row_
CUTLASS_DEVICE Index get_thread_start_row() {
return thread_start_row_;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/predicated_tile_iterator_predicates.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/predicated_tile_iterator_predicates.h",
"repo_id": "include",
"token_count": 3058
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
#include "cutlass/epilogue/warp/volta_tensor_op_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename InterleavedTileShape, ///< shape of indivisible instruction-level arrangement (concept: GemmShape)
typename ElementC, ///< Accumulator layout
typename Layout ///< target shared memory layout
>
struct TileIteratorVoltaTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape)
>
struct TileIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, half_t, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using InterleavedTileShape = gemm::GemmShape<32, 32, 4>;
using Element = half_t;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, Element, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// Array type for aligned memory accesses
using AccessType = typename Policy::AccessType;
/// This is the fragment size produced by one access of the iterator.
using Fragment = typename Policy::Fragment;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = typename Policy::AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Number of elements per access
static int const kElementsPerAccess = Policy::kElementsPerAccess;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
static int const kRowsPerQuad = 4;
static int const kColumnsPerQuad = 8;
static int const kAccessesPerQuad = kColumnsPerQuad / Policy::kElementsPerAccess;
static int const kAccessQuadDelta = 16;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
TileIteratorVoltaTensorOp(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / Policy::kElementsPerAccess) {
int quad_id = lane_id / Detail::kLanesInQuad;
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
int quad_row_idx = ((quad_id & 4) >> 1) + (quad_id & 1);
int quad_col_idx = ((quad_id & 2) >> 1);
int row = quad_row_idx * Detail::kRowsPerQuad + lane_in_quad;
int column = quad_col_idx * Detail::kColumnsPerQuad;
pointer_ += layout_({row, column / kElementsPerAccess});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / Policy::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int tile_idx = 0; tile_idx < Policy::TileIterations::kColumn; ++tile_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < Policy::kAccessesPerInterleavedTile; ++access_idx) {
int access_quad = access_idx / 2;
int access = access_idx % 2;
int ptr_offset = tile_idx * InterleavedTileShape::kN / Policy::kElementsPerAccess +
access_quad * Detail::kAccessQuadDelta / Policy::kElementsPerAccess +
access + pointer_offset / Policy::kElementsPerAccess;
int frag_idx = tile_idx * Policy::kAccessesPerInterleavedTile + access_idx;
AccessType access_vector = frag_ptr[frag_idx];
pointer_[ptr_offset] = access_vector;
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int tile_idx = 0; tile_idx < Policy::TileIterations::kColumn; ++tile_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < Policy::kAccessesPerInterleavedTile; ++access_idx) {
int access_quad = access_idx / 2;
int access = access_idx % 2;
int ptr_offset = tile_idx * Detail::kTileDelta + access_quad * Detail::kAccessQuadDelta +
access + pointer_offset / Policy::kElementsPerAccess;
int frag_idx = tile_idx * Policy::kAccessesPerInterleavedTile + access_idx;
frag_ptr[frag_idx] = pointer_[ptr_offset];
}
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment const &frag) {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape)
>
struct TileIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, float, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using InterleavedTileShape = gemm::GemmShape<32, 32, 4>;
using Element = float;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, Element, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// Array type for aligned memory accesses
using AccessType = typename Policy::AccessType;
/// This is the fragment size produced by one access of the iterator.
using Fragment = typename Policy::Fragment;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = typename Policy::AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Number of elements per access
static int const kElementsPerAccess = Policy::kElementsPerAccess;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
static int const kRowsPerQuad = 4;
static int const kColumnsPerQuad = 8;
static int const kAccessesPerQuad = kColumnsPerQuad / Policy::kElementsPerAccess;
static int const kAccessQuadDelta = 16;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
TileIteratorVoltaTensorOp(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / Policy::kElementsPerAccess) {
int quad_id = lane_id / Detail::kLanesInQuad;
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
int const kQuadRowDelta = 4;
int const kQuadColumnDelta = 2 * Policy::MmaIterations::kColumn;
int quad_row_offset = ((quad_id & 4) / 2 + (quad_id & 1)) * kQuadRowDelta;
int quad_column_offset = (quad_id & 2) / 2 * kQuadColumnDelta;
int thread_row_offset = (lane_in_quad & 1);
int thread_column_offset = (lane_in_quad & 2) / 2;
int row = quad_row_offset + thread_row_offset;
int column = quad_column_offset + thread_column_offset;
pointer_ += layout_({row, column});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / Policy::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
int const kAccessesPerRow = Policy::TileIterations::kColumn * Policy::MmaIterations::kColumn * 2;
CUTLASS_PRAGMA_UNROLL
for (int row_idx = 0; row_idx < Policy::kRowsPerMmaTile; ++row_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < kAccessesPerRow; ++access_idx) {
int frag_idx = row_idx * kAccessesPerRow + access_idx;
int ptr_column_offset = (access_idx & 1) * 2 +
(access_idx & 2) * Policy::MmaIterations::kColumn * 2 +
(access_idx & 4) * Policy::MmaIterations::kColumn * 2;
int ptr_row_offset = row_idx * 2;
int ptr_offset = layout_({ptr_row_offset, ptr_column_offset}) + pointer_offset / Policy::kElementsPerAccess;
pointer_[ptr_offset] = frag_ptr[frag_idx];
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
assert(0);
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment const &frag) {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h",
"repo_id": "include",
"token_count": 4836
} | 33 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/transform/collective/sm90_wgmma_transpose.hpp"
#include "cutlass/trace.h"
#include "cute/arch/cluster_sm90.hpp"
#include "cute/arch/copy_sm90.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cute/tensor_predicate.hpp"
#include "cute/numeric/arithmetic_tuple.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
// WarpSpecialized Mainloop
template <
int Stages,
class ClusterShape_,
class TileShape_,
class KernelSchedule,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm90CpAsyncGmmaRmemAWarpSpecialized<Stages,ClusterShape_,KernelSchedule>,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm90CpAsyncGmmaRmemAWarpSpecialized<Stages,ClusterShape_,KernelSchedule>;
using TileShape = TileShape_;
using ClusterShape = ClusterShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using CtaShape_MNK = decltype(shape_div(TileShape{}, ClusterShape{}));
// Swap and transpose A/B for A k-major layout and B mn-major layout since WGMMA is k-major only (e.g. tf32, Fp32, Int8, Fp8 WGMMA)
static constexpr bool IsLayoutAkBmn =
cute::is_same_v<gemm::detail::StrideToLayoutTagA_t<StrideA>, layout::RowMajor> &&
cute::is_same_v<gemm::detail::StrideToLayoutTagB_t<StrideB>, layout::RowMajor>;
static constexpr bool IsInputSizeTwoBytes = sizeof(ElementA) == 2 && sizeof(ElementB) == 2;
static constexpr bool SwapAB = !IsInputSizeTwoBytes && IsLayoutAkBmn;
using InternalGmemTiledCopyA = cute::conditional_t<!SwapAB, GmemTiledCopyA, GmemTiledCopyB>;
using InternalGmemTiledCopyB = cute::conditional_t<!SwapAB, GmemTiledCopyB, GmemTiledCopyA>;
using InternalSmemLayoutAtomA = cute::conditional_t<!SwapAB, SmemLayoutAtomA, SmemLayoutAtomB>;
using InternalSmemLayoutAtomB = cute::conditional_t<!SwapAB, SmemLayoutAtomB, SmemLayoutAtomA>;
using InternalSmemCopyAtomA = cute::conditional_t<!SwapAB, SmemCopyAtomA, SmemCopyAtomB>;
using InternalSmemCopyAtomB = cute::conditional_t<!SwapAB, SmemCopyAtomB, SmemCopyAtomA>;
// TMA converts f32 input to tf32 when copying from GMEM to SMEM
// For all other types, cast to size equivalent uint type to avoid any rounding by TMA.
static constexpr bool ConvertF32toTF32A = cute::is_same_v<float, ElementA>;
static constexpr bool ConvertF32toTF32B = cute::is_same_v<float, ElementB>;
using ConvertedElementA = cute::conditional_t<ConvertF32toTF32A, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementA>>>;
using ConvertedElementB = cute::conditional_t<ConvertF32toTF32B, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementB>>>;
using InternalElementA = cute::conditional_t<!SwapAB, ConvertedElementA, ConvertedElementB>;
using InternalElementB = cute::conditional_t<!SwapAB, ConvertedElementB, ConvertedElementA>;
using InternalStrideA = cute::conditional_t<!SwapAB, StrideA, StrideB>;
using InternalStrideB = cute::conditional_t<!SwapAB, StrideB, StrideA>;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
using MainloopPipeline = cutlass::PipelineAsync<DispatchPolicy::Stages>;
using PipelineState = typename MainloopPipeline::PipelineState;
using PipelineParams = typename MainloopPipeline::Params;
static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
using SmemLayoutA = decltype(tile_to_shape(
InternalSmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
using SmemLayoutB = decltype(tile_to_shape(
InternalSmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
// If A mn-layout and B mn-layout, transposing B matrix since WGMMA is k-major only (e.g. tf32, fp32, fp8, int8).
static constexpr bool IsLayoutAmnBmn =
cute::is_same_v<gemm::detail::StrideToLayoutTagA_t<StrideA>, layout::ColumnMajor> &&
cute::is_same_v<gemm::detail::StrideToLayoutTagB_t<StrideB>, layout::RowMajor>;
static constexpr bool TransposeB = !IsInputSizeTwoBytes && IsLayoutAmnBmn;
using TransposeOperandB = decltype(cutlass::transform::collective::detail::make_transpose_operand_b(
0, 0, TiledMma{}, SmemLayoutB{}, InternalSmemLayoutAtomB{},
InternalElementB{}, cute::bool_constant<TransposeB>{}));
static_assert(DispatchPolicy::Stages >= 2, "Specialization requires Stages set to value 2 or more.");
static_assert(not cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeA>::value &&
cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeB>::value,
"MMA atom must source A from rmem and B operand from smem_desc for this mainloop.");
using GmmaSmemLayoutAtomB = decltype(transform::collective::detail::gmma_smem_transpose_or_passthrough<
TransposeB, InternalSmemLayoutAtomB, InternalElementB>());
// SmemLayoutB for GMMA is different from SmemLayoutB for TMA if TransposeB
using GmmaSmemLayoutB = decltype(tile_to_shape(
GmmaSmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
static_assert(!SwapAB || !TransposeB, "Cannot SwapAB and TransposeB at the same time.");
static_assert(TransposeB xor (cute::is_same_v<SmemLayoutB, GmmaSmemLayoutB>),
"Should be same layout if not TransposeB.");
static_assert(!TransposeB || (cutlass::bits_to_bytes(size<1>(SmemLayoutB{}) * sizeof_bits<InternalElementB>::value)) == 128,
"SmemLayoutB K must be 128bytes to be transposed.");
static_assert(!transform::collective::detail::use_universal_transposition<InternalSmemLayoutAtomB, InternalElementB>(),
"Warp specialized ARF kernels have not supported universal B transposition yet.");
struct SharedStorage
{
struct TensorStorage : cute::aligned_struct<256> {
cute::array_aligned<typename TiledMma::ValTypeA, cute::cosize_v<SmemLayoutA>, 256> smem_A;
cute::array_aligned<typename TiledMma::ValTypeB, cute::cosize_v<SmemLayoutB>, 256> smem_B;
} tensors;
using PipelineStorage = typename MainloopPipeline::SharedStorage;
PipelineStorage pipeline;
};
using TensorStorage = typename SharedStorage::TensorStorage;
using PipelineStorage = typename SharedStorage::PipelineStorage;
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A = nullptr;
StrideA dA{};
ElementB const* ptr_B = nullptr;
StrideB dB{};
uint32_t mma_promotion_interval = 4;
};
// Device side kernel params
struct Params {
InternalElementA const* ptr_A = nullptr;
InternalStrideA dA{};
InternalElementB const* ptr_B = nullptr;
InternalStrideB dB{};
uint32_t mma_promotion_interval = 4;
};
//
// Methods
//
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(
[[maybe_unused]] ProblemShape const& problem_shape,
Arguments const& args,
[[maybe_unused]] void* workspace) {
if constexpr (not SwapAB) {
return {
reinterpret_cast<InternalElementA const*>(args.ptr_A),
args.dA,
reinterpret_cast<InternalElementB const*>(args.ptr_B),
args.dB
};
}
else {
return {
reinterpret_cast<InternalElementA const*>(args.ptr_B),
args.dB,
reinterpret_cast<InternalElementB const*>(args.ptr_A),
args.dA
};
}
}
template<class ProblemShape>
CUTLASS_HOST_DEVICE static bool
can_implement(
ProblemShape const& problem_shape,
[[maybe_unused]] Arguments const& args) {
auto problem_shape_MNKL = append<4>(problem_shape, 1);
auto [M,N,K,L] = problem_shape_MNKL;
bool implementable = true;
implementable = implementable && cutlass::detail::check_alignment<GmemTiledCopyA::NumValSrc>(cute::make_shape(M,K,L), StrideA{});
implementable = implementable && cutlass::detail::check_alignment<GmemTiledCopyB::NumValSrc>(cute::make_shape(N,K,L), StrideB{});
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n");
}
return implementable;
}
static constexpr int K_PIPE_MAX = DispatchPolicy::Stages;
static constexpr int K_PIPE_MMAS = 1;
/// Perform a collective-scoped matrix multiply-accumulate
/// Producer Perspective
template <
class TensorA,
class TensorB,
class KTileIterator,
class ResidueMNK
>
CUTLASS_DEVICE void
load(
MainloopPipeline pipeline,
PipelineState smem_pipe_write,
TensorA const& gA_in,
TensorB const& gB_in,
KTileIterator k_tile_iter, int k_tile_count,
ResidueMNK residue_mnk,
int thread_idx,
TensorStorage& shared_tensors)
{
using namespace cute;
static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident.");
static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident.");
Tensor sA = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
// Shift tensor so residue_k is at origin (Can't read any k_coord < residue_k)
// This aligns the tensor with BLK_K for all but the 0th k_tile
Tensor gA = domain_offset(make_coord(0, get<2>(residue_mnk), 0), gA_in);
Tensor gB = domain_offset(make_coord(0, get<2>(residue_mnk), 0), gB_in);
// Partition the copying of A and B tiles across the threads
InternalGmemTiledCopyA gmem_tiled_copy_a;
InternalGmemTiledCopyB gmem_tiled_copy_b;
auto gmem_thr_copy_a = gmem_tiled_copy_a.get_slice(thread_idx);
auto gmem_thr_copy_b = gmem_tiled_copy_b.get_slice(thread_idx);
Tensor tAgA = gmem_thr_copy_a.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k)
Tensor tAsA = gmem_thr_copy_a.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE)
Tensor tBgB = gmem_thr_copy_b.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k)
Tensor tBsB = gmem_thr_copy_b.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE)
// Allocate predicate tensors for m and n
Tensor tApA = make_tensor<bool>(make_shape(size<1>(tAsA), size<2>(tAsA)), Stride<_1,_0>{});
Tensor tBpB = make_tensor<bool>(make_shape(size<1>(tBsB), size<2>(tBsB)), Stride<_1,_0>{});
// Construct identity layout for sA and sB
Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k)
Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k)
// Repeat the partitioning with identity layouts
Tensor tAcA = gmem_thr_copy_a.partition_S(cA); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k)
Tensor tBcB = gmem_thr_copy_b.partition_S(cB); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k)
// Set predicates for m bounds
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < size<0>(tApA); ++m) {
tApA(m,0) = get<0>(tAcA(0,m,0)) < get<0>(residue_mnk); // blk_m coord < residue_m
}
// Set predicates for n bounds
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < size<0>(tBpB); ++n) {
tBpB(n,0) = get<0>(tBcB(0,n,0)) < get<1>(residue_mnk); // blk_n coord < residue_n
}
// 0-th stage with predication on k to account for residue
{
// LOCK smem_pipe_write for _writing_
pipeline.producer_acquire(smem_pipe_write);
int write_stage = smem_pipe_write.index();
// Copy gmem to smem for *k_tile_iter, predicating for k residue
Tensor tAgAk = tAgA(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tAsA); ++k) {
if (get<1>(tAcA(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gA shifted)
copy_if(gmem_tiled_copy_a, tApA(_,k), tAgAk(_,_,k), tAsA(_,_,k,write_stage));
}
else {
clear(tAsA(_,_,k,write_stage));
}
}
Tensor tBgBk = tBgB(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tBsB); ++k) {
if (get<1>(tBcB(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gB shifted)
copy_if(gmem_tiled_copy_b, tBpB(_,k), tBgBk(_,_,k), tBsB(_,_,k,write_stage));
}
else {
clear(tBsB(_,_,k,write_stage));
}
}
++k_tile_iter;
--k_tile_count;
// UNLOCK smem_pipe_write
pipeline.producer_commit(smem_pipe_write, cutlass::arch::cpasync_barrier_arrive);
// Advance smem_pipe_write
++smem_pipe_write;
}
// Mainloop
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 0; --k_tile_count) {
// LOCK smem_pipe_write for _writing_
pipeline.producer_acquire(smem_pipe_write);
int write_stage = smem_pipe_write.index();
// Copy gmem to smem for *k_tile_iter
copy_if(gmem_tiled_copy_a, tApA, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,write_stage));
copy_if(gmem_tiled_copy_b, tBpB, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,write_stage));
++k_tile_iter;
// UNLOCK smem_pipe_write
pipeline.producer_commit(smem_pipe_write, cutlass::arch::cpasync_barrier_arrive);
// Advance smem_pipe_write
++smem_pipe_write;
}
}
/// Perform a Producer Epilogue to prevent early exit of blocks in a Cluster
CUTLASS_DEVICE void
load_tail(
MainloopPipeline pipeline,
PipelineState smem_pipe_write) {
// Issue the epilogue waits
/* This helps avoid early exit of blocks in Cluster
* Waits for all stages to either be released (all
* Consumer UNLOCKs), or if the stage was never used
* then would just be acquired since the phase was
* still inverted from make_producer_start_state
*/
pipeline.producer_tail(smem_pipe_write);
}
/// Perform a collective-scoped matrix multiply-accumulate
/// Consumer Perspective
template <
class FrgTensorC
>
CUTLASS_DEVICE void
mma(MainloopPipeline pipeline,
PipelineState smem_pipe_read,
FrgTensorC& accum,
int k_tile_count,
int thread_idx,
TensorStorage& shared_tensors,
Params const& mainloop_params)
{
using namespace cute;
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "InternalSmemLayoutAtomA must be rank 2.");
static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "InternalSmemLayoutAtomB must be rank 2.");
static_assert(!cute::is_void_v<InternalSmemCopyAtomA>,
"SM90 GMMA mainloops must specify a non-void copy atom for smem sourced instructions.");
static_assert(cute::is_void_v<InternalSmemCopyAtomB>,
"SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions.");
// Obtain warp index
int warp_idx = canonical_warp_idx_sync();
[[maybe_unused]] int warp_group_thread_idx = thread_idx % 128;
Tensor sA_ = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sA = as_position_independent_swizzle_tensor(sA_); // (BLK_M,BLK_K,PIPE)
Tensor sB_ = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
Tensor sB = as_position_independent_swizzle_tensor(sB_); // (BLK_M,BLK_K,PIPE)
// If TransposeB, GMMA will read from transposed B layout SMEM
Tensor gmma_sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), GmmaSmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
//
// Define C accumulators and A/B partitioning
//
TiledMma tiled_mma;
auto thread_mma = tiled_mma.get_thread_slice(thread_idx);
// Allocate fragments and descriptors
Tensor tCsA = thread_mma.partition_A(sA);
Tensor tCrA = thread_mma.partition_fragment_A(sA(_,_,Int<0>{})); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCsB = thread_mma.partition_B(gmma_sB); // (MMA,MMA_N,MMA_K,PIPE)
Tensor tCrB = thread_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE)
//
// Copy Atom A retiling
//
auto smem_tiled_copy_A = make_tiled_copy_A(InternalSmemCopyAtomA{}, tiled_mma);
auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(thread_idx);
Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); // (CPY,CPY_M,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<2>(accum)); // N
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // K
CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tCsB)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE
//
// PIPELINED MAIN LOOP
//
static_assert((0 <= K_PIPE_MMAS) && (K_PIPE_MMAS < K_PIPE_MAX),
"ERROR : Incorrect number of MMAs in flight");
// We release buffers to producer warps(dma load) with some mmas in flight
PipelineState smem_pipe_release = smem_pipe_read;
tiled_mma.accumulate_ = GMMA::ScaleOut::Zero;
TransposeOperandB transpose = cutlass::transform::collective::detail::make_transpose_operand_b(
warp_idx, warp_group_thread_idx, tiled_mma, SmemLayoutB{},
InternalSmemLayoutAtomB{}, InternalElementB{},
cute::bool_constant<TransposeB>{});
warpgroup_fence_operand(accum);
// first k tile
{
pipeline.consumer_wait(smem_pipe_read);
int read_stage = smem_pipe_read.index();
++smem_pipe_read;
bool skip_wait = (pipeline.consumer_try_wait(smem_pipe_read) == BarrierStatus::WaitDone);
// copy smem->rmem for A operand
copy(smem_tiled_copy_A, tCsA(_,_,0,read_stage), tCrA_copy_view(_,_,0));
// transpose B operand in SMEM
transpose(sB, gmma_sB, read_stage, 0);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA) - 1; ++k_block) {
copy(smem_tiled_copy_A, tCsA(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1));
if (k_block == 0) {
transpose(sB, gmma_sB, read_stage, 1);
transpose.synchronize();
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
}
warpgroup_wait<2>();
if (k_tile_count - 1 > 0) {
if (!skip_wait) {
pipeline.consumer_wait(smem_pipe_read);
}
copy(smem_tiled_copy_A, tCsA(_,_,0,smem_pipe_read.index()), tCrA_copy_view(_,_,0));
transpose(sB, gmma_sB, smem_pipe_read.index(), 0);
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,size<2>(tCrA) - 1), tCrB(_,_,size<2>(tCrA) - 1,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
warpgroup_wait<2>();
}
warpgroup_fence_operand(accum);
// Mainloop GMMAs
--k_tile_count;
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > 1; --k_tile_count) {
//
// Compute on k_tile
//
int read_stage = smem_pipe_read.index();
++smem_pipe_read;
bool skip_wait = (pipeline.consumer_try_wait(smem_pipe_read) == BarrierStatus::WaitDone);
warpgroup_fence_operand(accum);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) {
if (k_block == size<2>(tCrA) - 1) {
if (!skip_wait) {
pipeline.consumer_wait(smem_pipe_read);
}
copy(smem_tiled_copy_A, tCsA(_,_,0,smem_pipe_read.index()), tCrA_copy_view(_,_,0));
// transpose B operand in SMEM
transpose(sB, gmma_sB, smem_pipe_read.index(), 0);
} else {
copy(smem_tiled_copy_A, tCsA(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1));
// transpose B operand in SMEM
if (k_block < 2) {
transpose.synchronize(k_block); // make transpose of k_block available
}
if (k_block == 0) {
transpose(sB, gmma_sB, read_stage, 1);
}
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
warpgroup_wait<2>();
if (k_block == 1) {
// release prior barrier
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
}
warpgroup_fence_operand(accum);
}
warpgroup_fence_operand(accum);
if (k_tile_count > 0) {
//
// Compute on k_tile
//
int read_stage = smem_pipe_read.index();
warpgroup_fence_operand(accum);
// Unroll the K mode manually to set scale D to 1
CUTLASS_PRAGMA_UNROLL
for (int k_block = 0; k_block < size<2>(tCrA) - 1; ++k_block) {
copy(smem_tiled_copy_A, tCsA(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1));
if (k_block < 2) {
transpose.synchronize(k_block); // make k_block transpose available
}
if (k_block == 0) {
transpose(sB, gmma_sB, read_stage, 1);
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
warpgroup_wait<2>();
if (k_block == 1) {
// release prior barrier
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
}
warpgroup_arrive();
// (V,M) x (V,N) => (V,M,N)
cute::gemm(tiled_mma, tCrA(_,_,size<2>(tCrA) - 1), tCrB(_,_,size<2>(tCrA) - 1,read_stage), accum);
tiled_mma.accumulate_ = GMMA::ScaleOut::One;
warpgroup_commit_batch();
warpgroup_wait<2>();
warpgroup_fence_operand(accum);
}
warpgroup_fence_operand(accum);
}
/// Perform a Consumer Epilogue to release all buffers
CUTLASS_DEVICE void
mma_tail(MainloopPipeline pipeline, PipelineState smem_pipe_release, int k_tile_count) {
// Prologue GMMAs
int prologue_mma_count = min(K_PIPE_MMAS, k_tile_count);
k_tile_count -= prologue_mma_count;
smem_pipe_release.advance(k_tile_count);
// Wait on all GMMAs to complete
warpgroup_wait<0>();
for (int count = 0; count < prologue_mma_count; ++count) {
pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it
++smem_pipe_release;
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/collective/sm90_mma_multistage_gmma_rs_warpspecialized.hpp/0 | {
"file_path": "include/cutlass/gemm/collective/sm90_mma_multistage_gmma_rs_warpspecialized.hpp",
"repo_id": "include",
"token_count": 12653
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/sparse_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_sparse.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*! Gemm device-level operator. This is an interface to efficient CUTLASS GEMM kernels that may
be invoked from host code.
The contributions of this class are:
1. At compile time, it maps data types and high-level structural parameters onto
specific CUTLASS components.
2. At runtime, it maps logical arguments to GEMM problems to kernel parameters.
3. At runtime, it launches kernels on the device.
The intent is to provide a convenient mechanism for interacting with most plausible GEMM
configurations for each supported architecture. Consequently, not all parameters are exposed
to the top-level interface. Rather, sensible defaults at each level of the CUTLASS hierarchy
are selected to tradeoff simplicity of the interface with flexibility. We expect
most configurations to be specified at this level. Applications with more exotic requirements
may construct their kernels of interest using CUTLASS components at the threadblock, warp,
and thread levels of abstraction.
CUTLASS exposes computations using the functor design pattern in which objects compose some
internal state with an overloaded function call operator. This enables decoupling of
initialization from execution, possibly reducing overhead during steady state phases of
application execution.
CUTLASS device-level operators expose an Arguments structure encompassing each logical
input to the computation. This is distinct from the kernel-level Params structure pattern
which contains application-specific precomputed state needed by the device code.
Example of a CUTLASS GEMM operator implementing the functionality of cuBLAS's SGEMM NN
is as follows:
//
// Instantiate the CUTLASS GEMM operator.
//
cutlass::gemm::device::Gemm<
float,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::ColumnMajor
> gemm_op;
//
// Launch the GEMM operation on the device
//
cutlass::Status status = gemm_op({
{m, n, k}, // GemmCoord problem_size,
{A, lda}, // TensorRef<float, layout::ColumnMajor> ref_A,
{B, ldb}, // TensorRef<float, layout::ColumnMajor> ref_B,
{C, ldc}, // TensorRef<float, layout::ColumnMajor> ref_C,
{D, ldd}, // TensorRef<float, layout::ColumnMajor> ref_D,
{alpha, beta} // EpilogueOutputOp::Params epilogue_op_params
});
A simplified view of the template is listed below.
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages
>
class Gemm;
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassSimt,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm70,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
typename threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator>
class SparseGemm {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
using MathOperator = Operator;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Define the kernel
using GemmKernel = typename kernel::DefaultSparseGemm<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator
>::GemmKernel;
using ElementE = typename GemmKernel::ElementE;
using LayoutE = typename GemmKernel::LayoutE;
static int const kAlignmentE = 128 / sizeof_bits<ElementE>::value;
static int const kSparse = GemmKernel::kSparse;
static int const kMetaSizeInBits = GemmKernel::kMetaSizeInBits;
static int const kElementsPerElementE = GemmKernel::kElementsPerElementE;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementC const, LayoutC> ref_C;
TensorRef<ElementC, LayoutC> ref_D;
TensorRef<ElementE const, LayoutE> ref_E;
typename EpilogueOutputOp::Params epilogue;
int split_k_slices;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments(): problem_size(0, 0, 0), split_k_slices(1) {
}
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementC const, LayoutC> ref_C_,
TensorRef<ElementC, LayoutC> ref_D_,
TensorRef<ElementE, LayoutE> ref_E_,
typename EpilogueOutputOp::Params epilogue_ =
typename EpilogueOutputOp::Params(),
int split_k_slices = 1
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
ref_E(ref_E_),
epilogue(epilogue_),
split_k_slices(split_k_slices) {
}
};
private:
/// Kernel parameters object
typename GemmKernel::Params params_;
public:
/// Constructs the GEMM.
SparseGemm() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
Status status = GemmKernel::can_implement(
args.problem_size,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D,
args.ref_E.non_const_ref()
);
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
if (kSplitKSerial && args.split_k_slices > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
if (kSplitKSerial) {
if (args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
}
// Initialize the Params structure
params_ = typename GemmKernel::Params{
args.problem_size,
grid_shape,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D,
args.ref_E.non_const_ref(),
args.epilogue,
static_cast<int *>(workspace)
};
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
params_.ref_A.reset(args.ref_A.non_const_ref().data());
params_.ref_B.reset(args.ref_B.non_const_ref().data());
params_.ref_C.reset(args.ref_C.non_const_ref().data());
params_.ref_D.reset(args.ref_D.data());
params_.ref_E.reset(args.ref_E.non_const_ref().data());
params_.output_op = args.epilogue;
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(GemmKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/gemm_sparse.h/0 | {
"file_path": "include/cutlass/gemm/device/gemm_sparse.h",
"repo_id": "include",
"token_count": 6306
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a TRMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/trmm_universal.h"
#include "cutlass/gemm/kernel/default_trmm_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*! Trmm device-level operator. This is an interface to efficient CUTLASS TRMM kernels that may
be invoked from host code.
The contributions of this class are:
1. At compile time, it maps data types and high-level structural parameters onto
specific CUTLASS components.
2. At runtime, it maps logical arguments to TRMM problems to kernel parameters.
3. At runtime, it launches kernels on the device.
The intent is to provide a convenient mechanism for interacting with most plausible TRMM
configurations for each supported architecture. Consequently, not all parameters are exposed
to the top-level interface. Rather, sensible defaults at each level of the CUTLASS hierarchy
are selected to tradeoff simplicity of the interface with flexibility. We expect
most configurations to be specified at this level. Applications with more exotic requirements
may construct their kernels of interest using CUTLASS components at the threadblock, warp,
and thread levels of abstraction.
CUTLASS exposes computations using the functor design pattern in which objects compose some
internal state with an overloaded function call operator. This enables decoupling of
initialization from execution, possibly reducing overhead during steady state phases of
application execution.
CUTLASS device-level operators expose an Arguments structure encompassing each logical
input to the computation. This is distinct from the kernel-level Params structure pattern
which contains application-specific precomputed state needed by the device code.
Example of a CUTLASS TRMM operator implementing the functionality of cuBLAS's STRMM NN
is as follows:
//
// Instantiate the CUTLASS TRMM operator.
//
cutlass::gemm::device::Trmm<
float,
cutlass::layout::ColumnMajor,
cutlass::SideMode::kLeft,
cutlass::FillMode::kLower,
cutlass::DiagType::kNonUnit,
float,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::ColumnMajor,
> trmm_op;
//
// Launch the TRMM operation on the device
//
cutlass::Status status = trmm_op({
cutlass::gemm::GemmUniversalMode, // Trmm Problem Mode
{m, n, m/n}, // GemmCoord problem_size (k is based on left- or right-side mode)
batch_count,
{alpha}, // EpilogueOutputOp::Params epilogue_op_params
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int lda,
int ldb,
int ldc
});
A simplified view of the template is listed below.
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// DiagType for A (kNonUnit or kUnit)
DiagType DiagTypeA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial,
/// Operation performed by TRMM
typename Operator,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA
>
class Trmm;
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Side Mode for A
SideMode SideModeA,
/// Fill Mode for A
FillMode FillModeA,
/// DiagType for A
DiagType DiagTypeA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassTensorOp,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm80,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = epilogue::thread::LinearCombination<
ElementC_,
128 / sizeof_bits<ElementC_>::value,
ElementAccumulator_,
ElementAccumulator_,
epilogue::thread::ScaleType::OnlyAlphaScaling
>,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by TRMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA = ComplexTransform::kNone>
class Trmm {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementAKernel = typename platform::conditional<(SideModeA == SideMode::kRight), ElementB_, ElementA_>::type;
using LayoutAKernel = typename platform::conditional<(SideModeA == SideMode::kRight), LayoutB_, LayoutA_>::type;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementBKernel = typename platform::conditional<(SideModeA == SideMode::kRight), ElementA_, ElementB_>::type;
using LayoutBKernel = typename platform::conditional<(SideModeA == SideMode::kRight), LayoutA_, LayoutB_>::type;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static SideMode const kSideMode = SideModeA;
static FillMode const kFillMode = FillModeA;
static DiagType const kDiagType = DiagTypeA;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentAKernel = (SideModeA == SideMode::kRight) ? AlignmentB : AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentBKernel = (SideModeA == SideMode::kRight) ? AlignmentA : AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
// Complex Transform don't appply to B
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static ComplexTransform const kTransformAKernel = (SideModeA == SideMode::kRight) ?
ComplexTransform::kNone : TransformA;
static ComplexTransform const kTransformBKernel = (SideModeA == SideMode::kRight) ?
TransformA : ComplexTransform::kNone;
/// Define the kernel
using TrmmKernel = typename kernel::DefaultTrmmUniversal<
ElementAKernel,
LayoutAKernel,
kTransformAKernel,
kAlignmentAKernel,
ElementBKernel,
LayoutBKernel,
kTransformBKernel,
kAlignmentBKernel,
kSideMode,
kFillMode,
kDiagType,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator
>::TrmmKernel;
using Arguments = typename TrmmKernel::Arguments;
private:
/// Kernel parameters object
typename TrmmKernel::Params params_;
public:
/// Constructs the TRMM.
Trmm() { }
/// Determines whether the TRMM can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
Status status = TrmmKernel::can_implement(args);
if (SideModeA == SideMode::kInvalid) {
return Status::kErrorInvalidProblem;
}
if (FillModeA == FillMode::kInvalid) {
return Status::kErrorInvalidProblem;
}
if (DiagTypeA == DiagType::kInvalid) {
return Status::kErrorInvalidProblem;
}
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial && args.batch_count > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes TRMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial) {
if (args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
}
int gemm_k_size = args.problem_size.k();
// Swapping argument for A and B, if A was on the right side (problem size doesn't need to change here).
if (kSideMode == SideMode::kRight) {
// Initialize the Params structure
params_ = typename TrmmKernel::Params{
args.swapped_matrices(),
grid_tiled_shape,
gemm_k_size,
static_cast<int *>(workspace)
};
return Status::kSuccess;
}
// Initialize the Params structure
params_ = typename TrmmKernel::Params{
args,
grid_tiled_shape,
gemm_k_size,
static_cast<int *>(workspace)
};
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes && !workspace) {
return Status::kErrorWorkspaceNull;
}
params_.update(args, workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(TrmmKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename TrmmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<TrmmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
cutlass::Kernel<TrmmKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
/********************************************************************************************************
TRMM has 4 combinations based on Layouts {RowMajor, ColumnMajor} x Side mode {LeftSide, RightSide}
In templates and arguments to cutlass kernel, `matrix A` is always triangular, and `matrix B` is rectangular.
(adhering to the cuBLAS convention)
For the mainloop and trmm kernel, `A` and `B` points to left-side and right-side matrices, respectively.
Thus, for LeftSide mode `A` and `B` points to `matrix A` and `matrix B`, respectively. While for
the RightSide mode `A` and `B` points to `matrix B` and `matrix A`, respectively.
Additionally, CUTLASS GEMM epilogue is always RowMajor, and ColumnMajor output is achieved by
transposing the GEMM problem. Thus, ColumnMajor output layout for TRMM requires:
- Transposing `matrix A` and `matrix B` layouts
- Swapping problem size m and n values
- Swapping LeftSide and RightSide mode
RowMajor output: D = matrix A x matrix B
ColumnMajor output: D = matrix A x matrix B -> Transpose (D) = Transpose(matrix B) x Transpose(matrix A)
{RowMajor, ColumnMajor} x Side Mode {LeftSide, RightSide} 4 cases:
1. LeftSide mode and RowMajor output (default template)
2. LeftSide mode and ColumnMajor output
3. RightSide mode and RowMajor output
4. RightSide mode and ColumnMajor output
Mapping ColumnMajor output layout cases 2 and 4 to RowMajor efficient epilogue implementation:
Case 2 -> Case 3:
D_col = matrix A x matrix B (LeftSide mode)
=> Transpose(D_col) = Transpose(matrix B) x Transpose(matrix A) (RightSide mode)
swap pointers for `A` and `B` call GEMM mainloop with RowMajor efficient-epilogue
Case 4 -> Case 1:
D_col = matrix B x matrix A (RightSide mode)
=> Transpose(D_col) = Transpose(matrix A) x Transpose(matrix B) (LeftSide mode)
call GEMM mainloop for with RowMajor efficient-epilogue
********************************************************************************************************/
/// Partial specialization for column-major output exchanges problem size and operand.
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Side Mode for A
SideMode SideModeA,
/// Fill Mode for A
FillMode FillModeA,
/// DiagType for A
DiagType DiagTypeA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// If true, kernel supports split-K as a serial reduction
bool SplitKSerial,
/// Operation performed by TRMM
typename Operator_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA>
class Trmm<ElementA_, LayoutA_, SideModeA, FillModeA, DiagTypeA,
ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, // partially specialized on LayoutC
ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB, SplitKSerial,
Operator_, TransformA> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static SideMode const kSideMode = SideModeA;
static FillMode const kFillMode = FillModeA;
static DiagType const kDiagType = DiagTypeA;
// Changing SideMode as we change the layout
static SideMode const kSideModeT = (SideModeA == SideMode::kLeft) ?
SideMode::kRight : SideMode::kLeft;
// Changing FillMode as we change the layout
static FillMode const kFillModeT = (FillModeA == FillMode::kLower) ?
FillMode::kUpper : FillMode::kLower;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static ComplexTransform const kTransformA = TransformA;
// Complex Transform don't appply to B
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static bool const kSplitKSerial = SplitKSerial;
using UnderlyingOperator = Trmm<
ElementA,
typename layout::LayoutTranspose<LayoutA>::type,
kSideModeT,
kFillModeT,
kDiagType,
ElementB,
typename layout::LayoutTranspose<LayoutB>::type,
ElementC,
layout::RowMajor,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kAlignmentA,
kAlignmentB,
kSplitKSerial,
Operator,
TransformA
>;
using Arguments = typename UnderlyingOperator::Arguments;
using TrmmKernel = typename UnderlyingOperator::TrmmKernel;
static int const kAlignmentC = UnderlyingOperator::kAlignmentC;
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the TRMM.
Trmm() { }
/// Helper to construct a transposed equivalent for the underying TRMM operator which is identical
static Arguments to_underlying_arguments(Arguments const &args) {
return args.transposed_problem_size();
}
/// Determines whether the TRMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Initializes TRMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/trmm.h/0 | {
"file_path": "include/cutlass/gemm/device/trmm.h",
"repo_id": "include",
"token_count": 9050
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Defines a Stream-K GEMM that can broadcast a bias vector in the epilogue.
Similar structure to DefaultGemmWithBroadcast, but uses its own epilogue
(DefaultStreamkEpilogueWithBroadcastTensorOp) and its own GEMM kernel
(GemmStreamkWithFusedEpilogue).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h"
#include "cutlass/epilogue/threadblock/epilogue_with_broadcast.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
///
typename Enable = void
>
struct DefaultGemmStreamkWithBroadcast {
using GemmBase = typename DefaultGemmUniversal<
ElementA_, LayoutA_, TransformA, kAlignmentA,
ElementB_, LayoutB_, TransformB, kAlignmentB,
ElementC_, LayoutC_, ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator
>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultStreamkEpilogueWithBroadcastTensorOp<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementC_,
typename EpilogueOutputOp::ElementT,
typename EpilogueOutputOp::ElementVector,
EpilogueOutputOp,
GemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = GemmStreamkWithFusedEpilogue<
typename GemmBase::Mma,
Epilogue,
ThreadblockSwizzle
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/default_gemm_streamk_with_broadcast.h/0 | {
"file_path": "include/cutlass/gemm/kernel/default_gemm_streamk_with_broadcast.h",
"repo_id": "include",
"token_count": 1552
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmPlanarComplexArray {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using Operator = typename Mma::Operator;
using ArchTag = typename Mma::ArchTag;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(
128 / sizeof_bits<ElementA>::value,
128 / sizeof_bits<ElementB>::value);
//
// Additional types needed for reflection
//
using ElementAccumulator = typename Mma::Policy::Operator::ElementC;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::Shape;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
//
// Arguments structure
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue{};
int const *ptr_M{nullptr};
int const *ptr_N{nullptr};
int const *ptr_K{nullptr};
void const * const * ptr_A_real{nullptr};
void const * const * ptr_A_imag{nullptr};
void const * const * ptr_B_real{nullptr};
void const * const * ptr_B_imag{nullptr};
void const * const * ptr_C_real{nullptr};
void const * const * ptr_C_imag{nullptr};
void * const * ptr_D_real{nullptr};
void * const * ptr_D_imag{nullptr};
typename LayoutA::Stride::Index lda_real{};
typename LayoutA::Stride::Index lda_imag{};
typename LayoutB::Stride::Index ldb_real{};
typename LayoutB::Stride::Index ldb_imag{};
typename LayoutC::Stride::Index ldc_real{};
typename LayoutC::Stride::Index ldc_imag{};
typename LayoutC::Stride::Index ldd_real{};
typename LayoutC::Stride::Index ldd_imag{};
//
// Methods
//
Arguments() = default;
/// constructs an arguments structure
Arguments(
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
int const *ptr_M,
int const *ptr_N,
int const *ptr_K,
void const * const * ptr_A_real,
void const * const * ptr_A_imag,
void const * const * ptr_B_real,
void const * const * ptr_B_imag,
void const * const * ptr_C_real,
void const * const * ptr_C_imag,
void * const * ptr_D_real,
void * const * ptr_D_imag,
typename LayoutA::Stride::Index lda_real,
typename LayoutA::Stride::Index lda_imag,
typename LayoutB::Stride::Index ldb_real,
typename LayoutB::Stride::Index ldb_imag,
typename LayoutC::Stride::Index ldc_real,
typename LayoutC::Stride::Index ldc_imag,
typename LayoutC::Stride::Index ldd_real,
typename LayoutC::Stride::Index ldd_imag)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_M(ptr_M),
ptr_N(ptr_N),
ptr_K(ptr_K),
ptr_A_real(ptr_A_real),
ptr_A_imag(ptr_A_imag),
ptr_B_real(ptr_B_real),
ptr_B_imag(ptr_B_imag),
ptr_C_real(ptr_C_real),
ptr_C_imag(ptr_C_imag),
ptr_D_real(ptr_D_real),
ptr_D_imag(ptr_D_imag),
lda_real(lda_real),
lda_imag(lda_imag),
ldb_real(ldb_real),
ldb_imag(ldb_imag),
ldc_real(ldc_real),
ldc_imag(ldc_imag),
ldd_real(ldd_real),
ldd_imag(ldd_imag)
{}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_M, args.ptr_N);
std::swap(args.ptr_A_real, args.ptr_B_real);
std::swap(args.ptr_A_imag, args.ptr_B_imag);
std::swap(args.lda_real, args.ldb_real);
std::swap(args.lda_imag, args.ldb_imag);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
//
// Data members
//
typename Mma::IteratorA::Params params_A_real{};
typename Mma::IteratorA::Params params_A_imag{};
typename Mma::IteratorB::Params params_B_real{};
typename Mma::IteratorB::Params params_B_imag{};
typename Epilogue::OutputTileIterator::Params params_C_real{};
typename Epilogue::OutputTileIterator::Params params_C_imag{};
typename Epilogue::OutputTileIterator::Params params_D_real{};
typename Epilogue::OutputTileIterator::Params params_D_imag{};
typename EpilogueOutputOp::Params output_op{};
int const *ptr_M{nullptr};
int const *ptr_N{nullptr};
int const *ptr_K{nullptr};
void const * const * ptr_A_real{nullptr};
void const * const * ptr_A_imag{nullptr};
void const * const * ptr_B_real{nullptr};
void const * const * ptr_B_imag{nullptr};
void const * const * ptr_C_real{nullptr};
void const * const * ptr_C_imag{nullptr};
void * const * ptr_D_real{nullptr};
void * const * ptr_D_imag{nullptr};
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
ptr_M(args.ptr_M),
ptr_N(args.ptr_N),
ptr_K(args.ptr_K),
params_A_real(args.lda_real),
params_A_imag(args.lda_imag),
params_B_real(args.ldb_real),
params_B_imag(args.ldb_imag),
params_C_real(args.ldc_real),
params_C_imag(args.ldc_imag),
params_D_real(args.ldd_real),
params_D_imag(args.ldd_imag),
output_op(args.epilogue),
ptr_A_real(args.ptr_A_real),
ptr_A_imag(args.ptr_A_imag),
ptr_B_real(args.ptr_B_real),
ptr_B_imag(args.ptr_B_imag),
ptr_C_real(args.ptr_C_real),
ptr_C_imag(args.ptr_C_imag),
ptr_D_real(args.ptr_D_real),
ptr_D_imag(args.ptr_D_imag)
{}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
ptr_M = args.ptr_M;
ptr_N = args.ptr_N;
ptr_K = args.ptr_K;
ptr_A_real = args.ptr_A_real;
ptr_A_imag = args.ptr_A_imag;
ptr_B_real = args.ptr_B_real;
ptr_B_imag = args.ptr_B_imag;
ptr_C_real = args.ptr_C_real;
ptr_C_imag = args.ptr_C_imag;
ptr_D_real = args.ptr_D_real;
ptr_D_imag = args.ptr_D_imag;
output_op = args.epilogue;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(Arguments const &args) {
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = args.problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = args.problem_size.m() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = args.problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = args.problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = args.problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = args.problem_size.m() % kAlignmentC;
}
if (isAMisaligned || isBMisaligned || isCMisaligned) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmPlanarComplexArray op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int batch_idx = threadblock_tile_offset.k();
int problem_size_m = params.problem_size.m();
int problem_size_n = params.problem_size.n();
int problem_size_k = params.problem_size.k();
ElementA *ptr_A_real = static_cast<ElementA *>(const_cast<void *>(params.ptr_A_real[batch_idx]));
ElementA *ptr_A_imag = static_cast<ElementA *>(const_cast<void *>(params.ptr_A_imag[batch_idx]));
ElementB *ptr_B_real = static_cast<ElementB *>(const_cast<void *>(params.ptr_B_real[batch_idx]));
ElementB *ptr_B_imag = static_cast<ElementB *>(const_cast<void *>(params.ptr_B_imag[batch_idx]));
//
// If pointers for problem sizes are specified, these are loaded from global memory
//
if (params.ptr_M) {
problem_size_m = params.ptr_M[batch_idx];
}
if (params.ptr_N) {
problem_size_n = params.ptr_N[batch_idx];
}
if (params.ptr_K) {
problem_size_k = params.ptr_K[batch_idx];
}
int const kBlockCountM = (problem_size_m + Mma::Shape::kM - 1) / Mma::Shape::kM;
int const kBlockCountN = (problem_size_n + Mma::Shape::kN - 1) / Mma::Shape::kN;
int const kGemmKIterations = (problem_size_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
//
// Each threadblock loops over the logical problem size which the kernel may have discovered
// after the grid is launched.
//
CUTLASS_PRAGMA_NO_UNROLL
for (int block_m = threadblock_tile_offset.m();
block_m < kBlockCountM;
block_m += params.grid_tiled_shape.m()) {
CUTLASS_PRAGMA_NO_UNROLL
for (int block_n = threadblock_tile_offset.n();
block_n < kBlockCountN;
block_n += params.grid_tiled_shape.n()) {
//
// Compute indices within threadblock and warp.
//
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Proceed with regular GEMM logic.
//
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{ block_m * Mma::Shape::kM, 0};
cutlass::MatrixCoord tb_offset_B{ 0, block_n * Mma::Shape::kN };
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A_real(
params.params_A_real,
ptr_A_real,
{problem_size_m, problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorA iterator_A_imag(
params.params_A_imag,
ptr_A_imag,
{problem_size_m, problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B_real(
params.params_B_real,
ptr_B_real,
{problem_size_k, problem_size_n},
thread_idx,
tb_offset_B);
typename Mma::IteratorB iterator_B_imag(
params.params_B_imag,
ptr_B_imag,
{problem_size_k, problem_size_n},
thread_idx,
tb_offset_B);
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
mma(
kGemmKIterations,
accumulators,
iterator_A_real,
iterator_A_imag,
iterator_B_real,
iterator_B_imag,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
//assume identity swizzle
MatrixCoord threadblock_offset(
block_m * Mma::Shape::kM,
block_n * Mma::Shape::kN
);
ElementC *ptr_C_real = static_cast<ElementC *>(const_cast<void *>(params.ptr_C_real[batch_idx]));
ElementC *ptr_C_imag = static_cast<ElementC *>(const_cast<void *>(params.ptr_C_imag[batch_idx]));
ElementC *ptr_D_real = static_cast<ElementC *>(params.ptr_D_real[batch_idx]);
ElementC *ptr_D_imag = static_cast<ElementC *>(params.ptr_D_imag[batch_idx]);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C_real(
params.params_C_real,
ptr_C_real,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_C_imag(
params.params_C_imag,
ptr_C_imag,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D_real(
params.params_D_real,
ptr_D_real,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_D_imag(
params.params_D_imag,
ptr_D_imag,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
//
// Construct epilogue
//
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D_real,
iterator_D_imag,
accumulators,
iterator_C_real,
iterator_C_imag);
} // for block_n
} // for block_m
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_planar_complex_array.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_planar_complex_array.h",
"repo_id": "include",
"token_count": 7703
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Base functionality for common types of universal GEMM kernel parameters
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace util {
template <class LayoutA, class LayoutB>
CUTLASS_HOST_DEVICE
static bool
is_continous_k_aligned(GemmCoord problem_size, size_t alignmentA, size_t alignmentB) {
return (platform::is_same<LayoutA, layout::RowMajor>::value && (problem_size.k() % alignmentA) == 0) ||
(platform::is_same<LayoutB, layout::ColumnMajor>::value && (problem_size.k() % alignmentB) == 0);
}
} // namespace util
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Argument structure
struct UniversalArgumentsBase
{
//
// Data members
//
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
GemmCoord problem_size{};
int batch_count{1};
int64_t batch_stride_D{0};
//
// Methods
//
UniversalArgumentsBase() = default;
/// constructs an arguments structure
UniversalArgumentsBase(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
int64_t batch_stride_D)
:
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
batch_stride_D(batch_stride_D)
{
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
};
/// Parameters structure
template <
typename ThreadblockSwizzle,
typename ThreadblockShape,
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutA,
typename LayoutB>
struct UniversalParamsBase
{
//
// Data members
//
GemmCoord problem_size{};
GemmCoord grid_tiled_shape{};
int swizzle_log_tile{0};
GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm;
int batch_count {0};
int gemm_k_size {0};
int64_t batch_stride_D {0};
int *semaphore = nullptr;
//
// Host dispatch API
//
/// Default constructor
UniversalParamsBase() = default;
/// Constructor
UniversalParamsBase(
UniversalArgumentsBase const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
problem_size(args.problem_size),
mode(args.mode),
batch_count(args.batch_count),
batch_stride_D(args.batch_stride_D),
semaphore(nullptr)
{
init_grid_tiled_shape();
}
/// Returns the workspace size (in bytes) needed for this problem geometry
size_t get_workspace_size() const
{
size_t workspace_bytes = 0;
if (mode == GemmUniversalMode::kGemmSplitKParallel)
{
// Split-K parallel always requires a temporary workspace
workspace_bytes =
sizeof(ElementC) *
size_t(batch_stride_D) *
size_t(grid_tiled_shape.k());
}
else if (mode == GemmUniversalMode::kGemm && grid_tiled_shape.k() > 1)
{
// Serial split-K only requires a temporary workspace if the number of partitions along the
// GEMM K dimension is greater than one.
workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n());
}
return workspace_bytes;
}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
semaphore = static_cast<int *>(workspace);
// Zero-initialize entire workspace
if (semaphore)
{
size_t workspace_bytes = get_workspace_size();
CUTLASS_TRACE_HOST(" Initialize " << workspace_bytes << " workspace bytes");
cudaError_t result = cudaMemsetAsync(
semaphore,
0,
workspace_bytes,
stream);
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Returns the GEMM volume in thread block tiles
GemmCoord get_tiled_shape() const
{
return grid_tiled_shape;
}
/// Returns the total number of thread blocks to launch
int get_grid_blocks() const
{
dim3 grid_dims = get_grid_dims();
return grid_dims.x * grid_dims.y * grid_dims.z;
}
/// Returns the grid extents in thread blocks to launch
dim3 get_grid_dims() const
{
return ThreadblockSwizzle().get_grid_shape(grid_tiled_shape);
}
private:
CUTLASS_HOST_DEVICE
void init_grid_tiled_shape() {
// Get GEMM volume in thread block tiles
grid_tiled_shape = ThreadblockSwizzle::get_tiled_shape(
problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
batch_count);
swizzle_log_tile = ThreadblockSwizzle::get_log_tile(grid_tiled_shape);
// Determine extent of K-dimension assigned to each block
gemm_k_size = problem_size.k();
if (mode == GemmUniversalMode::kGemm || mode == GemmUniversalMode::kGemmSplitKParallel)
{
static const uint32_t CACHELINE_BYTES = 128;
static const size_t element_bytes_a = sizeof(ElementA);
static const size_t element_bytes_b = sizeof(ElementB);
static const size_t cacheline_elements_a = CACHELINE_BYTES / element_bytes_a;
static const size_t cacheline_elements_b = CACHELINE_BYTES / element_bytes_b;
const bool cacheline_alignment_needed =
util::is_continous_k_aligned<LayoutA, LayoutB>(problem_size, cacheline_elements_a, cacheline_elements_b);
int const kAlignK = const_max(
const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value),
cacheline_alignment_needed ? const_max(cacheline_elements_a, cacheline_elements_b) : 1);
gemm_k_size = round_up(ceil_div(problem_size.k(), batch_count), kAlignK);
if (gemm_k_size) {
grid_tiled_shape.k() = ceil_div(problem_size.k(), gemm_k_size);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/params_universal_base.h/0 | {
"file_path": "include/cutlass/gemm/kernel/params_universal_base.h",
"repo_id": "include",
"token_count": 2874
} | 39 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/fast_math.h"
#include "cutlass/gemm_coord.hpp"
#include "cutlass/kernel_hardware_info.hpp"
#include "cutlass/gemm/kernel/tile_scheduler_params.h"
#include "cute/layout.hpp"
#include "cute/tensor.hpp"
#include "cute/arch/cluster_sm90.hpp"
namespace cutlass::gemm::kernel::detail {
///////////////////////////////////////////////////////////////////////////////
// Persistent Thread Block (TB) scheduler
template <class GroupProblemShape>
class PersistentTileSchedulerSm90Group {
//
// Data members
//
private:
uint64_t current_work_linear_idx_ = 0;
uint64_t total_grid_size_ = 0;
// Tracking current group, its starting linear idx and total tiles
struct GroupInfo {
int group_idx = 0;
uint64_t start_linear_idx = 0;
uint64_t total_tiles = 0;
} current_group_info_;
public:
struct WorkTileInfo {
int32_t M_idx = 0;
int32_t N_idx = 0;
int32_t L_idx = 0;
bool is_valid_tile = false;
CUTLASS_HOST_DEVICE
bool
is_valid() const {
return is_valid_tile;
}
CUTLASS_HOST_DEVICE
static WorkTileInfo
invalid_work_tile() {
return {-1, -1, -1, false};
}
CUTLASS_HOST_DEVICE
bool
is_final_split(uint32_t k_tiles_per_output_tile) const {
return true;
}
CUTLASS_HOST_DEVICE
int32_t
reduction_subtile_idx() const {
return -1;
}
};
using ProblemShape = typename GroupProblemShape::UnderlyingProblemShape;
using Params = PersistentTileSchedulerSm90GroupParams<ProblemShape>;
using RasterOrder = typename Params::RasterOrder;
using RasterOrderOptions = typename Params::RasterOrderOptions;
struct Arguments {
int max_swizzle_size = 1;
// Not applying Heuristics for Grouped problems, since largest dimension can change per group
RasterOrderOptions raster_order = RasterOrderOptions::AlongM;
};
// Sink scheduler params as a member
Params scheduler_params;
//
// Methods
//
template <class TileShape, class ClusterShape>
static Params
to_underlying_arguments(
GroupProblemShape problem_shapes,
TileShape tile_shape,
ClusterShape cluster_shape,
KernelHardwareInfo const& hw_info,
Arguments const& arguments,
[[maybe_unused]] void* workspace=nullptr,
[[maybe_unused]] const uint32_t epilogue_subtile = 1) {
// We only need the tile and cluster shape during scheduler setup, so let FTAD do the magic
static_assert(cute::is_static<TileShape>::value);
static_assert(cute::is_static<ClusterShape>::value);
dim3 problem_blocks = get_tiled_cta_shape_mnl(
problem_shapes.groups(),
problem_shapes,
hw_info,
tile_shape, cluster_shape);
Params params;
params.initialize(
problem_blocks,
problem_shapes.groups(),
problem_shapes.problem_shapes,
problem_shapes.host_problem_shapes,
to_gemm_coord(tile_shape),
to_gemm_coord(cluster_shape),
hw_info,
arguments.max_swizzle_size,
arguments.raster_order
);
return params;
}
// Given the inputs, computes the physical grid we should launch.
template<class TileShape, class ClusterShape>
CUTLASS_HOST_DEVICE static
dim3
get_grid_shape(
GroupProblemShape problem_shapes,
TileShape tile_shape,
ClusterShape cluster_shape,
KernelHardwareInfo hw_info,
Arguments arguments,
bool truncate_by_problem_size=true) {
dim3 problem_blocks = get_tiled_cta_shape_mnl(
problem_shapes.groups(),
problem_shapes,
hw_info,
tile_shape, cluster_shape);
return Params::get_grid_shape(
problem_blocks,
to_gemm_coord(cluster_shape),
hw_info,
arguments.max_swizzle_size,
arguments.raster_order,
/* truncate_by_problem_size = */true
);
}
// Given the inputs, computes the total number of output blocks this problem will compute over
// Note that this is only the logical size of our grid, not the physical grid we will actually launch.
template<class BlockShape, class ClusterShape>
CUTLASS_HOST_DEVICE static
dim3
get_tiled_cta_shape_mnl(int groups, GroupProblemShape problem_shapes, KernelHardwareInfo hw_info, BlockShape cta_shape, ClusterShape cluster_shape) {
uint32_t total_ctas = 0;
uint32_t cta_in_N_dim = 1; // We linearize the blocks across all the problems here
// If host problem shapes are not provided.
if (!problem_shapes.is_host_problem_shape_available()) {
total_ctas = hw_info.sm_count;
}
// If host problem shapes are provided, make a better decision about possibility to launch smaller grid.
else {
for (int group = 0; group < groups; group++) {
auto ctas_along_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shapes.get_host_problem_shape(group)), cute::shape<0>(cta_shape)));
auto ctas_along_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shapes.get_host_problem_shape(group)), cute::shape<1>(cta_shape)));
auto problem_blocks_m = round_up(ctas_along_m, cute::get<0>(cluster_shape));
auto problem_blocks_n = round_up(ctas_along_n, cute::get<1>(cluster_shape));
total_ctas += problem_blocks_m * problem_blocks_n;
}
}
return Params::get_tiled_cta_shape_mnl(
to_gemm_coord(cluster_shape),
total_ctas, cta_in_N_dim
);
}
CUTLASS_HOST_DEVICE
static bool
can_implement(Arguments const& args) {
return true;
}
PersistentTileSchedulerSm90Group() = default;
CUTLASS_DEVICE explicit PersistentTileSchedulerSm90Group(Params const& params_) : scheduler_params(params_) {
// MSVC requires protecting use of CUDA-specific nonstandard syntax,
// like blockIdx and gridDim, with __CUDA_ARCH__.
#if defined(__CUDA_ARCH__)
if (scheduler_params.raster_order_ == RasterOrder::AlongN) {
current_work_linear_idx_ = uint64_t(blockIdx.x) + uint64_t(blockIdx.y) * uint64_t(gridDim.x);
}
else {
current_work_linear_idx_ = uint64_t(blockIdx.x) * uint64_t(gridDim.y) + uint64_t(blockIdx.y);
}
total_grid_size_ = uint64_t(gridDim.x) * uint64_t(gridDim.y) * uint64_t(gridDim.z);
uint64_t ctas_along_m, ctas_along_n;
if (is_tuple<decltype(cute::shape<0>(params_.problem_shapes_[0]))>::value ||
is_tuple<decltype(cute::shape<1>(params_.problem_shapes_[0]))>::value) {
ctas_along_m = cute::size(cute::ceil_div(cute::shape<0>(params_.problem_shapes_[0]), scheduler_params.cta_shape_.m()));
ctas_along_n = cute::size(cute::ceil_div(cute::shape<1>(params_.problem_shapes_[0]), scheduler_params.cta_shape_.n()));
}
else {
ctas_along_m = scheduler_params.divmod_cta_shape_m_.divide(cute::shape<0>(params_.problem_shapes_[0]) + scheduler_params.divmod_cta_shape_m_.divisor - 1);
ctas_along_n = scheduler_params.divmod_cta_shape_n_.divide(cute::shape<1>(params_.problem_shapes_[0]) + scheduler_params.divmod_cta_shape_n_.divisor - 1);
}
auto problem_blocks_m = round_up(ctas_along_m, (1 << params_.log_swizzle_size_) * params_.cluster_shape_.m());
auto problem_blocks_n = round_up(ctas_along_n, (1 << params_.log_swizzle_size_) * params_.cluster_shape_.n());
current_group_info_.total_tiles = problem_blocks_m * problem_blocks_n;
#else
CUTLASS_ASSERT(false && "This line should never be reached");
#endif
}
CUTLASS_DEVICE
WorkTileInfo
get_current_work() {
return get_current_work_for_linear_idx(current_work_linear_idx_);
}
CUTLASS_DEVICE
WorkTileInfo
get_current_work_for_linear_idx(uint64_t linear_idx) {
if (scheduler_params.pre_processed_problem_shapes && linear_idx >= scheduler_params.blocks_across_problem_) {
return WorkTileInfo::invalid_work_tile();
}
return get_work_idx_m_and_n(linear_idx,
current_group_info_,
scheduler_params.groups_,
scheduler_params.problem_shapes_,
scheduler_params.cta_shape_,
scheduler_params.cluster_shape_,
scheduler_params.divmod_cluster_shape_major_,
scheduler_params.divmod_cluster_shape_minor_,
scheduler_params.divmod_cta_shape_m_,
scheduler_params.divmod_cta_shape_n_,
scheduler_params.log_swizzle_size_,
scheduler_params.raster_order_);
}
CUTLASS_DEVICE
void
advance_to_next_work(uint32_t advance_count = 1) {
current_work_linear_idx_ += total_grid_size_ * uint64_t(advance_count);
}
// get work_idx_m, work_idx_n from linear_idx while applying swizzle
static CUTLASS_DEVICE
WorkTileInfo
get_work_idx_m_and_n(
uint64_t linear_idx,
struct GroupInfo& group_info,
int32_t total_problem_groups,
ProblemShape* problem_shapes,
GemmCoord cta_shape,
GemmCoord cluster_shape,
FastDivmodU64Pow2 const& divmod_cluster_shape_major,
FastDivmodU64Pow2 const& divmod_cluster_shape_minor,
FastDivmodU64 const& divmod_cta_shape_m,
FastDivmodU64 const& divmod_cta_shape_n,
int32_t log_swizzle_size,
RasterOrder raster_order) {
bool valid_tile = true;
uint64_t ctas_along_m, ctas_along_n;
if (is_tuple<decltype(cute::shape<0>(problem_shapes[group_info.group_idx]))>::value ||
is_tuple<decltype(cute::shape<1>(problem_shapes[group_info.group_idx]))>::value) {
ctas_along_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shapes[group_info.group_idx]), cta_shape.m()));
ctas_along_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shapes[group_info.group_idx]), cta_shape.n()));
}
else {
ctas_along_m = divmod_cta_shape_m.divide(cute::shape<0>(problem_shapes[group_info.group_idx]) + divmod_cta_shape_m.divisor - 1);
ctas_along_n = divmod_cta_shape_n.divide(cute::shape<1>(problem_shapes[group_info.group_idx]) + divmod_cta_shape_n.divisor - 1);
}
auto problem_blocks_m = round_up(ctas_along_m, (1 << log_swizzle_size) * cluster_shape.m());
auto problem_blocks_n = round_up(ctas_along_n, (1 << log_swizzle_size) * cluster_shape.n());
group_info.total_tiles = problem_blocks_m * problem_blocks_n;
while (group_info.start_linear_idx + group_info.total_tiles <= linear_idx) {
group_info.group_idx++;
if (group_info.group_idx >= total_problem_groups)
return WorkTileInfo::invalid_work_tile();
group_info.start_linear_idx += group_info.total_tiles;
if (is_tuple<decltype(cute::shape<0>(problem_shapes[group_info.group_idx]))>::value ||
is_tuple<decltype(cute::shape<1>(problem_shapes[group_info.group_idx]))>::value) {
ctas_along_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shapes[group_info.group_idx]), cta_shape.m()));
ctas_along_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shapes[group_info.group_idx]), cta_shape.n()));
}
else {
ctas_along_m = divmod_cta_shape_m.divide(cute::shape<0>(problem_shapes[group_info.group_idx]) + divmod_cta_shape_m.divisor - 1);
ctas_along_n = divmod_cta_shape_n.divide(cute::shape<1>(problem_shapes[group_info.group_idx]) + divmod_cta_shape_n.divisor - 1);
}
problem_blocks_m = round_up(ctas_along_m, (1 << log_swizzle_size) * cluster_shape.m());
problem_blocks_n = round_up(ctas_along_n, (1 << log_swizzle_size) * cluster_shape.n());
group_info.total_tiles = problem_blocks_m * problem_blocks_n;
}
uint64_t cluster_id, cluster_major_offset = 0, cluster_minor_offset = 0;
uint64_t blk_per_grid_dim = divmod_cluster_shape_minor.divide(linear_idx - group_info.start_linear_idx);
divmod_cluster_shape_major(cluster_id, cluster_major_offset, blk_per_grid_dim);
auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster();
if (raster_order == RasterOrder::AlongN) {
cluster_minor_offset = cta_m_in_cluster;
}
else {
cluster_minor_offset = cta_n_in_cluster;
}
uint64_t cluster_idx_minor, cluster_idx_major;
uint64_t cluster_idx_minor_div_swizzle, extra, offset;
offset = cluster_id & ((1 << log_swizzle_size) - 1);
extra = cluster_id >> log_swizzle_size;
uint64_t curr_group_cluster_blk_major;
if (raster_order == RasterOrder::AlongN) {
curr_group_cluster_blk_major = divmod_cluster_shape_major.divide(problem_blocks_n);
}
else {
curr_group_cluster_blk_major = divmod_cluster_shape_major.divide(problem_blocks_m);
}
cluster_idx_minor_div_swizzle = extra / curr_group_cluster_blk_major;
cluster_idx_major = extra % curr_group_cluster_blk_major;
cluster_idx_minor = cluster_idx_minor_div_swizzle * (1 << log_swizzle_size) + offset;
auto minor_work_idx = static_cast<int32_t>(cluster_idx_minor * divmod_cluster_shape_minor.divisor +
cluster_minor_offset);
auto major_work_idx = static_cast<int32_t>(cluster_idx_major * divmod_cluster_shape_major.divisor +
cluster_major_offset);
if (raster_order == RasterOrder::AlongN) {
return {minor_work_idx, major_work_idx, group_info.group_idx, valid_tile};
}
else {
return {major_work_idx, minor_work_idx, group_info.group_idx, valid_tile};
}
}
// Returns whether the block assigned this work should compute the epilogue for the corresponding
// output tile. For the basic tile scheduler, this is always true.
CUTLASS_HOST_DEVICE
static bool
compute_epilogue(WorkTileInfo const&, Params const&) {
return true;
}
// Performs the reduction across splits for a given output tile. Since this scheduler does
// not split output tiles, no reduction is needed.
template <class FrgTensorC>
CUTLASS_DEVICE
static void
fixup(Params const&, WorkTileInfo const&, FrgTensorC&, uint32_t, uint32_t) {}
// Returns whether the current WorkTileInfo passed in should continue to be used. Since
// this scheduler only schedules work in units of single, full output tiles, the WorkTileInfo
// passed in should not be used after having been processed.
CUTLASS_DEVICE
static bool
continue_current_work(WorkTileInfo&) {
return false;
}
// The basic tile scheduler does not require any additional workspace
template <class ProblemShape, class ElementAccumulator>
static size_t
get_workspace_size(Arguments const&, ProblemShape, KernelHardwareInfo const&, uint32_t, const uint32_t = 1) {
return 0;
}
template <class ProblemShape, class ElementAccumulator>
static cutlass::Status
initialize_workspace(Arguments const&, void*, cudaStream_t, ProblemShape, KernelHardwareInfo const&,
uint32_t, const uint32_t = 1) {
return Status::kSuccess;
}
template <class ProblemShape_MNKL, class TileShape>
CUTLASS_HOST_DEVICE
static int
get_work_k_tile_count(WorkTileInfo const& work_tile_info, ProblemShape_MNKL problem_shape, TileShape tile_shape) {
// All work units returned by this scheduler cover the entire K iteration
// space of the output tile assigned to the work unit.
return cute::size(cute::ceil_div(cute::get<2>(problem_shape), cute::get<2>(tile_shape)));
}
CUTLASS_HOST_DEVICE
static uint32_t
get_work_k_tile_start(WorkTileInfo const&) {
// All work units returned by this scheduler start from K tile 0
return 0u;
}
CUTLASS_DEVICE
static bool
need_separate_reduction(Params const& params) {
return false;
}
CUTLASS_DEVICE
bool
is_work_tile_for_reduction(WorkTileInfo const& work_tile_info, Params const& params) {
return false;
}
CUTLASS_DEVICE
uint32_t
epilgoue_subtile_idx(WorkTileInfo const& work_tile_info, Params const& params) const {
return 0;
}
template <class FrgTensorC>
CUTLASS_DEVICE
void
separate_reduction(
Params const& params,
WorkTileInfo const& work_tile_info,
FrgTensorC& accumulators,
uint32_t num_barriers,
uint32_t barrier_idx) {
}
// Shares the accumulator set with peers in the global workspace
template <class FrgTensorC>
CUTLASS_DEVICE
static void
share(
Params const& params,
WorkTileInfo const& work_tile_info,
FrgTensorC& accumulators,
uint32_t num_barriers,
uint32_t barrier_idx) {
}
CUTLASS_DEVICE
static bool
valid_warpgroup_in_work_tile(WorkTileInfo const& work_tile_info) {
return true;
}
CUTLASS_DEVICE
static bool
requires_separate_reduction(Params const& params) {
return false;
}
};
} // namespace cutlass::gemm::kernel::detail
| include/cutlass/gemm/kernel/sm90_tile_scheduler_group.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/sm90_tile_scheduler_group.hpp",
"repo_id": "include",
"token_count": 7408
} | 40 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/permute.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/gemm/threadblock/default_mma_core_wmma.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
/// Gather operand A by using an index array
bool GatherA = false,
/// Gather operand B by using an index array
bool GatherB = false,
/// Permute operand A
typename PermuteALayout = layout::NoPermute,
/// Permute operand B
typename PermuteBLayout = layout::NoPermute
>
struct DefaultMma;
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass Simt)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operand
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false, SharedMemoryClearOption::kNone,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
static_assert(platform::is_same<LayoutC, layout::RowMajor>::value
|| platform::is_same<LayoutC, layout::AffineRankN<2>>::value,
"simt epilogue must be row major");
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC,
arch::OpClassSimt, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA,
GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB,
GatherB, PermuteBLayout>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
LayoutC, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false, SharedMemoryClear,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA,
GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB,
GatherB, PermuteBLayout>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<float, LayoutA, kAlignmentA, float, LayoutB,
kAlignmentB, float, layout::RowMajor,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false, SharedMemoryClearOption::kNone,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, float, LayoutA, float,
LayoutB, float, layout::RowMajor, arch::OpClassTensorOp, 2,
arch::OpMultiplyAddFastF16>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
float, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA,
GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
float, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB,
GatherB, PermuteBLayout>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, float,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Number of Interleaved K
int InterleavedK>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass,
ArchTag, ThreadblockShape, WarpShape, InstructionShape, 2,
Operator, true, SharedMemoryClearOption::kNone, false, false,
layout::NoPermute, layout::NoPermute> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, 2, Operator,
true>;
static_assert(kAlignmentA == 128 / sizeof_bits<ElementA>::value,
"Alignment must match thread data map's vector length");
static_assert(kAlignmentB ==128 / sizeof_bits<ElementB>::value,
"Alignment must match thread data map's vector length");
// Define iterators over tiles from the A operand
using IteratorA = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>, ElementA,
LayoutA, 1, typename MmaCore::IteratorThreadMapA>;
// Define iterators over tiles from the B operand
using IteratorB = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>, ElementB,
LayoutB, 0, typename MmaCore::IteratorThreadMapB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>,
typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operand
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator, false, SharedMemoryClearOption::kNone,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
static_assert(platform::is_same<LayoutC, layout::RowMajor>::value
|| platform::is_same<LayoutC, layout::AffineRankN<2>>::value,
"simt epilogue must be row major");
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC, arch::OpClassSimt,
Stages, Operator>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA, GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB, GatherB, PermuteBLayout>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, LayoutC,
typename MmaCore::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operand
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Permute operand A
typename PermuteALayout,
/// Permute operand B
typename PermuteBLayout
>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator, false, SharedMemoryClear,
GatherA, GatherB, PermuteALayout, PermuteBLayout> {
static_assert(platform::is_same<LayoutC, layout::RowMajor>::value
|| platform::is_same<LayoutC, layout::AffineRankN<2>>::value,
"simt epilogue must be row major");
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * kAlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * kAlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, Operator, false, CacheOpA, CacheOpB>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA, GatherA, PermuteALayout>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB, GatherB, PermuteBLayout>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, LayoutC,
typename MmaCore::MmaPolicy, Stages, SharedMemoryClear>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Number of Interleaved K
int InterleavedK>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass,
ArchTag, ThreadblockShape, WarpShape, InstructionShape,
Stages, Operator, true, SharedMemoryClearOption::kNone,
false, false, layout::NoPermute, layout::NoPermute> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, Stages,
Operator, true>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor,
typename MmaCore::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for SIMT IDP4A Kernels
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Operation performed by GEMM
typename Operator,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape>
struct DefaultMma<int8_t, LayoutA, kAlignmentA, int8_t, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
ArchTag, ThreadblockShape, WarpShape, GemmShape<1, 1, 4>, 2,
Operator, false, SharedMemoryClearOption::kNone,
false, false, layout::NoPermute, layout::NoPermute> {
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using ElementB = int8_t;
using OperatorClass = arch::OpClassSimt;
static const bool transposeA = platform::is_same< LayoutA, layout::ColumnMajor >::value;
static const bool transposeB = platform::is_same< LayoutB, layout::RowMajor >::value;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
OperatorClass, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
/// Specialization for Wmma TensorOp operator with 2 staged pipeline
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false, SharedMemoryClearOption::kNone,
false, false, layout::NoPermute, layout::NoPermute> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
LayoutC, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for Wmma TensorOp operator with 1 staged pipeline
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator>
struct DefaultMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 1, Operator, false, SharedMemoryClearOption::kNone,
false, false, layout::NoPermute, layout::NoPermute> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, 1, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped singlestage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaSingleStage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
LayoutC, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
#endif //CUTLASS_ARCH_WMMA_ENABLED
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/default_mma.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_mma.h",
"repo_id": "include",
"token_count": 12168
} | 41 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming
expectations about data layout of the global memory fragments, data types,
and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp
instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Template defininng default matrix multiply operators inferred from
/// threadblock tile size, global memory data layout, and target math
/// instruction.
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator = arch::OpMultiplyAddComplex,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global>
struct DefaultMultistageMmaComplexCore;
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/default_multistage_mma_complex_core.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_multistage_mma_complex_core.h",
"repo_id": "include",
"token_count": 1405
} | 42 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_planar_complex_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Transformation applied to A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Transformation applied to B
ComplexTransform TransformB = ComplexTransform::kNone
>
class MmaPlanarComplexPipelined :
public MmaPlanarComplexBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = MmaPlanarComplexBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy = Policy_;
using ArchTag = typename Policy::Operator::ArchTag;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
/// Transformation applied to A
static ComplexTransform const kTransformA = TransformA;
/// Transformation applied to B
static ComplexTransform const kTransformB = TransformB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = ArrayPlanarComplex<
typename Policy::Operator::FragmentC::Element,
Policy::Operator::FragmentC::kElements
>;
/// Warp-level Mma
using Operator = typename Policy::Operator;
private:
using FragmentA = typename IteratorA::Fragment;
using FragmentB = typename IteratorB::Fragment;
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaPlanarComplexPipelined(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
private:
CUTLASS_DEVICE
void warp_mma_planar_complex(
Operator & warp_mma,
FragmentC &accum,
WarpFragmentA const & real_A,
WarpFragmentA const & imag_A,
WarpFragmentB const & real_B,
WarpFragmentB const & imag_B) {
cutlass::negate<Array<typename WarpFragmentB::Element, WarpFragmentB::kElements>> neg_op_B;
WarpFragmentB neg_real_B = neg_op_B(real_B);
WarpFragmentB neg_imag_B = neg_op_B(imag_B);
warp_mma(accum.real, real_A, real_B, accum.real);
if (kTransformB == ComplexTransform::kNone) {
warp_mma(accum.imag, real_A, imag_B, accum.imag);
}
else {
warp_mma(accum.imag, real_A, neg_imag_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone) {
warp_mma(accum.imag, imag_A, real_B, accum.imag);
}
else {
warp_mma(accum.imag, imag_A, neg_real_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone ^ kTransformB == ComplexTransform::kNone) {
warp_mma(accum.real, imag_A, imag_B, accum.real);
}
else {
warp_mma(accum.real, imag_A, neg_imag_B, accum.real);
}
}
public:
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A_real,
///< iterator over A operand in global memory
IteratorA iterator_A_imag,
///< iterator over B operand in global memory
IteratorB iterator_B_real,
///< iterator over B operand in global memory
IteratorB iterator_B_imag,
///< initial value of accumulator
FragmentC const &src_accum) {
//
// Prologue
//
// Perform accumulation in the 'd' output operand
accum = src_accum;
FragmentA tb_frag_A_real;
FragmentA tb_frag_A_imag;
FragmentB tb_frag_B_real;
FragmentB tb_frag_B_imag;
tb_frag_A_real.clear();
tb_frag_A_imag.clear();
tb_frag_B_real.clear();
tb_frag_B_imag.clear();
// The last kblock is loaded in the prolog
iterator_A_real.load(tb_frag_A_real);
iterator_A_imag.load(tb_frag_A_imag);
iterator_B_real.load(tb_frag_B_real);
iterator_B_imag.load(tb_frag_B_imag);
++iterator_A_real;
++iterator_A_imag;
++iterator_B_real;
++iterator_B_imag;
this->smem_iterator_A_.store(tb_frag_A_real);
this->smem_iterator_A_.store_with_pointer_offset(tb_frag_A_imag, Base::SharedStorage::kImaginaryStrideA);
this->smem_iterator_B_.store(tb_frag_B_real);
this->smem_iterator_B_.store_with_pointer_offset(tb_frag_B_imag, Base::SharedStorage::kImaginaryStrideB);
++this->smem_iterator_A_;
++this->smem_iterator_B_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA warp_frag_real_A[2];
WarpFragmentA warp_frag_imag_A[2];
WarpFragmentB warp_frag_real_B[2];
WarpFragmentB warp_frag_imag_B[2];
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_real_A[0]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[0], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[0]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[0], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
Operator warp_mma;
int smem_write_stage_idx = 1;
// Avoid reading out of bounds
iterator_A_real.clear_mask(gemm_k_iterations <= 1);
iterator_A_imag.clear_mask(gemm_k_iterations <= 1);
iterator_B_real.clear_mask(gemm_k_iterations <= 1);
iterator_B_imag.clear_mask(gemm_k_iterations <= 1);
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing
// shared memory loads (which have the tightest latency requirement).
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(tb_frag_A_real);
this->smem_iterator_A_.store_with_pointer_offset(tb_frag_A_imag, Base::SharedStorage::kImaginaryStrideA);
this->smem_iterator_B_.store(tb_frag_B_real);
this->smem_iterator_B_.store_with_pointer_offset(tb_frag_B_imag, Base::SharedStorage::kImaginaryStrideB);
__syncthreads();
++this->smem_iterator_B_;
++this->smem_iterator_A_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
else {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations,
0});
}
smem_write_stage_idx ^= 1;
}
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_real_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k == 0) {
iterator_A_real.load(tb_frag_A_real);
iterator_A_imag.load(tb_frag_A_imag);
iterator_B_real.load(tb_frag_B_real);
iterator_B_imag.load(tb_frag_B_imag);
++iterator_A_real;
++iterator_A_imag;
++iterator_B_real;
++iterator_B_imag;
// Avoid reading out of bounds if this was the last loop iteration
iterator_A_real.clear_mask(gemm_k_iterations <= 2);
iterator_A_imag.clear_mask(gemm_k_iterations <= 2);
iterator_B_real.clear_mask(gemm_k_iterations <= 2);
iterator_B_imag.clear_mask(gemm_k_iterations <= 2);
}
warp_mma_planar_complex(
warp_mma,
accum,
warp_frag_real_A[warp_mma_k % 2],
warp_frag_imag_A[warp_mma_k % 2],
warp_frag_real_B[warp_mma_k % 2],
warp_frag_imag_B[warp_mma_k % 2]);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/mma_planar_complex_pipelined.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/mma_planar_complex_pipelined.h",
"repo_id": "include",
"token_count": 5667
} | 43 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/functional.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/arch/mma_sm90.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
/// Data type of real & imag members of complex numbers in the SourceFragment
typename RealElement,
/// Destination fragment required by the mma operation
typename DestinationFragment,
/// Source fragment holding complex<RealElement> elements
typename SourceFragment,
/// Number of mma operations performed
typename MmaIterations,
/// Shape of operand elements
typename MmaOperandShape,
/// Complex transform on A operand
ComplexTransform Transform_,
/// Operand A or Operand B
Operand Operand_,
/// Floating-point rounding style
FloatRoundStyle Round_>
struct UnpackComplexConvertAndPackForMma;
// Partial specialization for OperandA and Congruous smem layout
template <
typename RealElement,
typename DestinationFragment,
typename SourceFragment,
typename MmaIterations,
typename MmaOperandShape,
ComplexTransform Transform_,
FloatRoundStyle Round_>
struct UnpackComplexConvertAndPackForMma <
RealElement,
DestinationFragment,
SourceFragment,
MmaIterations,
MmaOperandShape,
Transform_,
Operand::kA,
Round_> {
//
// Type definitions
//
static Operand const kOperand = Operand::kA;
static ComplexTransform const kTransform = Transform_;
static FloatRoundStyle const kRound = Round_;
// Data type of elements in the destination fragment
using MmaElement = typename DestinationFragment::Element;
// Numeric convertor MmaElement <= RealElement
using Converter = NumericConverter<MmaElement, RealElement, kRound>;
// Operand layout parameters
using SourceFragmentLayout = layout::ColumnMajor;
static int const kLdm = MmaIterations::kRow * MmaOperandShape::kRow;
/// Ctor
CUTLASS_DEVICE
UnpackComplexConvertAndPackForMma() {}
CUTLASS_DEVICE
void operator()(DestinationFragment *dest, SourceFragment const &source) {
Converter convert_op;
SourceFragmentLayout layout(kLdm);
CUTLASS_PRAGMA_UNROLL
for(int i=0; i<MmaIterations::kRow; i++) {
int pos = 0;
CUTLASS_PRAGMA_UNROLL
for(int c=0; c<MmaOperandShape::kColumn; c++) {
CUTLASS_PRAGMA_UNROLL
for(int r=0; r<MmaOperandShape::kRow; r++) {
// Logical position of element in source fragment
int row = r + i * MmaOperandShape::kRow;
int col = c;
// Access complex<RealElement> and apply rounding on real and imag parts
MmaElement a = convert_op(source[layout(MatrixCoord{row,col})].real());
MmaElement b = convert_op(source[layout(MatrixCoord{row,col})].imag());
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest[i][pos] = a;
dest[i+MmaIterations::kRow][pos++] = (kTransform == ComplexTransform::kConjugate ? -b : b);
}
}
}
}
};
// Partial specialization for OperandB and Congruous smem layout
template <
typename RealElement,
typename DestinationFragment,
typename SourceFragment,
typename MmaIterations,
typename MmaOperandShape,
ComplexTransform Transform_,
FloatRoundStyle Round_>
struct UnpackComplexConvertAndPackForMma <
RealElement,
DestinationFragment,
SourceFragment,
MmaIterations,
MmaOperandShape,
Transform_,
Operand::kB,
Round_> {
//
// Type definitions
//
static Operand const kOperand = Operand::kB;
static ComplexTransform const kTransform = Transform_;
static FloatRoundStyle const kRound = Round_;
// Data type of elements in the destination fragment
using MmaElement = typename DestinationFragment::Element;
// Numeric convertor MmaElement <= RealElement
using Converter = NumericConverter<MmaElement, RealElement, kRound>;
// Operand layout parameters
using SourceFragmentLayout = layout::RowMajor;
static int const kLdm = MmaIterations::kColumn * MmaOperandShape::kColumn;
/// Ctor
CUTLASS_DEVICE
UnpackComplexConvertAndPackForMma() {}
CUTLASS_HOST_DEVICE
void operator()(DestinationFragment *dest, SourceFragment const &source) {
Converter convert_op;
SourceFragmentLayout layout(kLdm);
CUTLASS_PRAGMA_UNROLL
for(int i=0; i<MmaIterations::kColumn; i++) {
int pos = 0;
CUTLASS_PRAGMA_UNROLL
for(int c=0; c<MmaOperandShape::kColumn; c++) {
CUTLASS_PRAGMA_UNROLL
for(int r=0; r<MmaOperandShape::kRow; r++) {
// Logical position of element in source fragment
int row = r;
int col = c + i * MmaOperandShape::kColumn;
// Access complex<RealElement> apply rounding on real and imag parts
MmaElement a = convert_op(source[layout(MatrixCoord{row,col})].real());
MmaElement b = convert_op(source[layout(MatrixCoord{row,col})].imag());
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest[i][pos] = a;
dest[i+MmaIterations::kColumn][pos++] = (kTransform == ComplexTransform::kConjugate ? -b : b);
}
}
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transform on B operand
ComplexTransform TransformB = ComplexTransform::kNone,
/// Do source operands need more than one elements
bool GeneralizedOperatorElements = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaComplexTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex using real-valued TensorOps
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaComplexTensorOp<
Shape_,
complex<RealElementA>,
LayoutA_,
complex<RealElementB>,
LayoutB_,
complex<RealElementC>,
LayoutC_,
Policy_,
TransformA,
TransformB> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicyTensorOp)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Architecture tag from underlying instruction
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 2 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected planar complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(MmaOperandA::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the A operand."
"We can geneneralize later.");
static_assert(MmaOperandB::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the B operand."
"We can geneneralize later.");
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
operand_A[0] = A[m].real();
operand_B[0] = B[n].real();
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
operand_A[0] = A[m].real();
operand_B[0] = (kTransformB == ComplexTransform::kConjugate ? -B[n].imag() : B[n].imag());
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.real(), -a.imag(), b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
// A imaginary part is intentionally negated
operand_A[0] = (kTransformA == ComplexTransform::kConjugate ? A[m].imag() : -A[m].imag());
operand_B[0] = (kTransformB == ComplexTransform::kConjugate ? -B[n].imag() : B[n].imag());
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
operand_A[0] = (kTransformA == ComplexTransform::kConjugate ? -A[m].imag() : A[m].imag());
operand_B[0] = B[n].real();
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex:
// Operands data type: complex<float>
// Rounding: float -> tfloat32_t (round half_ulp_truncate nearest)
// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
// Output data type: complex<float>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaComplexTensorOp<
Shape_,
complex<float>,
LayoutA_,
complex<float>,
LayoutB_,
complex<float>,
LayoutC_,
Policy_,
TransformA,
TransformB> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of members of complex multiplicand A
using RealElementA = float;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of members of complex multiplicand B
using RealElementB = float;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of members of complex accumulator matrix C
using RealElementC = float;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = typename arch::OpMultiplyAddComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA =
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements * 2>;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB =
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements * 2>;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of complex products operations performed (one complex product needs four mma instructions)
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using InstMmaOperandA = typename ArchMmaOperator::FragmentA;
using InstMmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(platform::is_same<cutlass::gemm::GemmShape<16, 8, 8>, typename ArchMmaOperator::Shape>::value,
"This implementation only supports mma.m16n8k8 math instructions.");
static_assert(InstMmaOperandA::kElements == 4,
"This implementation only supports math instructions in which exactly four element is needed for the A operand."
"We can geneneralize later.");
static_assert(InstMmaOperandB::kElements == 2,
"This implementation only supports math instructions in which exactly two element is needed for the B operand."
"We can geneneralize later.");
// Instruction Operands A & B holding real part followed by imaginary part for mma operations
InstMmaOperandA const *operand_A = reinterpret_cast<InstMmaOperandA const *>(&A);
InstMmaOperandB const *operand_B = reinterpret_cast<InstMmaOperandB const *>(&B);
//
// Accumulate in place
//
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A[m], operand_B[n], *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A[m], operand_B[n+MmaIterations::kColumn], *accum);
}
// mma(accum.real(), a.imag(), -b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// negate OperandB to accumulate -(a.imag()*b.imag())
// negating OperandB emits less instrucitons than negating OperandA as OperandB has less elements
negate<InstMmaOperandB> negate_op;
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A[m+MmaIterations::kRow], negate_op(operand_B[n+MmaIterations::kColumn]), *accum);
}
// mma(accum.imag(), a.imag(), b.real(), accum.imag())
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A[m+MmaIterations::kRow], operand_B[n], *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
// Alias types for underlying real-valued matrix multiply operator
using InstMmaOperandA = typename ArchMmaOperator::FragmentA;
using InstMmaOperandB = typename ArchMmaOperator::FragmentB;
//
// Define conversions from source type to instruction operands' type
//
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 900
FloatRoundStyle const kRoundA = FloatRoundStyle::round_to_nearest;
FloatRoundStyle const kRoundB = FloatRoundStyle::round_to_nearest;
#else
FloatRoundStyle const kRoundA = FloatRoundStyle::round_half_ulp_trunc_dntz;
FloatRoundStyle const kRoundB = FloatRoundStyle::round_half_ulp_trunc_dntz;
#endif
detail::UnpackComplexConvertAndPackForMma <
RealElementA,
InstMmaOperandA,
FragmentA,
MmaIterations,
MatrixShape<2, 2>,
kTransformA,
Operand::kA,
kRoundA> convert_A;
detail::UnpackComplexConvertAndPackForMma <
RealElementB,
InstMmaOperandB,
FragmentB,
MmaIterations,
MatrixShape<2, 1>,
kTransformB,
Operand::kB,
kRoundB> convert_B;
// Convert Fragment[A|B] holding complex<RealElement[A|B]> to InstMmaOperand[A|B] holding InstMmaOperand[A|B]::Element
convert_A(reinterpret_cast<InstMmaOperandA *>(&dst_A), A);
convert_B(reinterpret_cast<InstMmaOperandB *>(&dst_B), B);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex:
// Operands data type: complex<double>
// Math instruction: mma.sync.aligned.m16n8k4.f64.f64.f64.f64
// Output data type: complex<double>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaComplexTensorOp<
Shape_,
complex<double>,
LayoutA_,
complex<double>,
LayoutB_,
complex<double>,
LayoutC_,
Policy_,
TransformA,
TransformB,
true> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of members of complex multiplicand A
using RealElementA = double;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of members of complex multiplicand B
using RealElementB = double;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of members of complex accumulator matrix C
using RealElementC = double;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicyTensorOp)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = typename arch::OpMultiplyAddComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 2 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected planar complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = A[m*MmaOperandA::kElements + mk].real();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = B[n*MmaOperandB::kElements + nk].real();
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = A[m*MmaOperandA::kElements + mk].real();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = (kTransformB == ComplexTransform::kConjugate ?
-B[n*MmaOperandB::kElements + nk].imag() : B[n*MmaOperandB::kElements + nk].imag());
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.real(), -a.imag(), b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
// A imaginary part is intentionally negated
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = (kTransformA == ComplexTransform::kConjugate ?
A[m*MmaOperandA::kElements + mk].imag() : -A[m*MmaOperandA::kElements + mk].imag());
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = (kTransformB == ComplexTransform::kConjugate ?
-B[n*MmaOperandB::kElements + nk].imag() : B[n*MmaOperandB::kElements + nk].imag());
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.imag(), a.imag(), b.real(), accum.imag())
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = (kTransformA == ComplexTransform::kConjugate ?
-A[m*MmaOperandA::kElements + mk].imag() : A[m*MmaOperandA::kElements + mk].imag());
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = B[n*MmaOperandB::kElements + nk].real();
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_complex_tensor_op.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_complex_tensor_op.h",
"repo_id": "include",
"token_count": 13631
} | 44 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/// Tile access iterator
/// Each iteration acess in the tile is
/// used as multiplicand for one
/// warp-level matrix multiplication
template <
/// Size of the tile (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand_,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: MatrixShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads = 32,
/// Enable Residual Support
bool EnableResidual = false,
/// Number of partitions along K dimension
int PartitionsK_ = 1
>
class MmaTensorOpMultiplicandTileAccessIterator {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
/// Basic check
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Number of elements accessed per Shared Memory load
static int const kElementsPerAccess =
(sizeof_bits<Element>::value >= 32 ? 1 : 32 / sizeof_bits<Element>::value);
using InstructionCount = MatrixShape<
Shape::kRow / InstructionShape::kRow,
Shape::kColumn / InstructionShape::kColumn
>;
static int const kIterations = (kOperand == Operand::kA) ?
InstructionCount::kColumn : InstructionCount::kRow;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
(kOperand == Operand::kA) ?
(Shape::kRow * InstructionShape::kColumn / kThreads) :
(Shape::kColumn * InstructionShape::kRow / kThreads)
>;
/// Memory access type
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Underlying tensor reference
TensorRef ref_;
/// Extent of tensor
MatrixCoord extent_;
/// Origin
MatrixCoord origin_;
/// Used to load residual tile
bool is_residual_;
/// residual offset of each thread
TensorCoord residual_offset_;
/// Iterations in a tile
int iterations_;
public:
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator(
TensorRef const &ref,
TensorCoord extent,
int lane_id
): ref_(ref), extent_(extent), is_residual_(false), iterations_(0) {
if (kOperand == Operand::kA) {
origin_ = MatrixCoord(lane_id / 4, (lane_id % 4) * kElementsPerAccess);
}
else {
origin_ = MatrixCoord((lane_id % 4) * kElementsPerAccess, lane_id / 4);
}
ref_.add_coord_offset(origin_);
if(EnableResidual) {
// compute residual offset
if (kOperand == Operand::kA) {
typename TensorCoord::Index residual_size =
extent_.column() % Shape::kColumn;
if(residual_size) {
is_residual_ = true;
residual_offset_ = make_Coord(0, residual_size);
}
}
else {
typename TensorCoord::Index residual_size =
extent_.row() % Shape::kRow;
if(residual_size) {
is_residual_ = true;
residual_offset_ = make_Coord(residual_size, 0);
}
}
}
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator(
TensorRef const &ref,
int lane_id
): MmaTensorOpMultiplicandTileAccessIterator(ref,
{Shape::kRow, Shape::kColumn}, lane_id) {
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator &add_tile_offset(TensorCoord const &tile_offset) {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
void advance() {
if(EnableResidual && is_residual_) {
is_residual_ = false;
origin_ += residual_offset_;
ref_.add_coord_offset(residual_offset_);
}
else {
if (kOperand == Operand::kA) {
add_tile_offset({0, 1});
}
else {
add_tile_offset({1, 0});
}
}
iterations_ = 0;
}
/// increase iterations in a tile
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator & operator++() {
iterations_++;
if(iterations_ >= kIterations)
advance();
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
int const kWarpShapeDivisibleInner =
(kOperand == Operand::kA ? InstructionShape::kColumn : InstructionShape::kRow);
// Take advantage of Tensor Op's 8 x 4T access pattern
int const kAccessesInner = (kWarpShapeDivisibleInner / kElementsPerAccess) / 4;
AccessType *access_ptr = reinterpret_cast<AccessType *>(&frag);
if (kOperand == Operand::kA) {
int const kTilesPerInstruction = InstructionShape::kRow / 8;
CUTLASS_PRAGMA_UNROLL
for (int inst_m_idx = 0; inst_m_idx < InstructionCount::kRow; ++inst_m_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_m_idx = 0; access_m_idx < kTilesPerInstruction; ++access_m_idx) {
int access_idx =
access_m_idx + kTilesPerInstruction * (inner_idx + kAccessesInner * inst_m_idx);
MatrixCoord offset(
access_m_idx * 8 + inst_m_idx * InstructionShape::kRow,
inner_idx * 4 * kElementsPerAccess + iterations_ * InstructionShape::kColumn);
MatrixCoord access_coord = origin_ + offset;
// if(access_coord.row() < extent_.row() && access_coord.column() < extent_.column()) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
// }
// else {
// AccessType zero;
// zero.clear();
// access_ptr[access_idx] = zero;
// }
}
}
}
}
else {
CUTLASS_PRAGMA_UNROLL
for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn; ++inst_n_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
int access_idx = inner_idx + kAccessesInner * inst_n_idx;
MatrixCoord offset(
inner_idx * 4 * kElementsPerAccess + iterations_ * InstructionShape::kRow,
inst_n_idx * 8);
MatrixCoord access_coord = origin_ + offset;
// if(access_coord.row() < extent_.row() && access_coord.column() < extent_.column()) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
// }
// else {
// AccessType zero;
// zero.clear();
// access_ptr[access_idx] = zero;
// }
}
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h",
"repo_id": "include",
"token_count": 4051
} | 45 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic thread level reduction with specializations for Array<T, N>.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/half.h"
#include "cutlass/functional.h"
namespace cutlass {
namespace reduction {
namespace thread {
/// Structure to compute the thread level reduction
template <typename Op, typename T>
struct Reduce;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization of Reduce for "plus" (a functional operator)
template <typename T>
struct Reduce< plus<T>, T > {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
plus<T> _op;
return _op(lhs, rhs);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization of Reduce for Array<T, N>
template <typename T, int N>
struct Reduce < plus<T>, Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, 1> operator()(Array<T, N> const &in) const {
Array<T, 1> result;
Reduce< plus<T>, T > scalar_reduce;
result.clear();
CUTLASS_PRAGMA_UNROLL
for (auto i = 0; i < N; ++i) {
result[0] = scalar_reduce(result[0], in[i]);
}
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specializations of Reduce for Array<half_t, N>
template <int N>
struct Reduce < plus<half_t>, Array<half_t, N> > {
CUTLASS_HOST_DEVICE
Array<half_t, 1> operator()(Array<half_t, N> const &input) {
Array<half_t, 1> result;
// If there is only 1 element - there is nothing to reduce
if( N ==1 ){
result[0] = input.front();
} else {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
__half result_d;
Array<half_t, 1> const *in_ptr_half = reinterpret_cast<Array<half_t, 1> const *>(&input);
Array<half_t, 2> const *in_ptr_half2 = reinterpret_cast<Array<half_t, 2> const *>(&input);
__half2 const *x_in_half2 = reinterpret_cast<__half2 const *>(in_ptr_half2);
// Set initial result = first half2, in case N==2
__half2 tmp_result = x_in_half2[0];
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < N/2; ++i) {
tmp_result = __hadd2(x_in_half2[i], tmp_result);
}
result_d = __hadd(__low2half(tmp_result), __high2half(tmp_result));
// One final step is needed for odd "N" (to add the (N-1)th element)
if( N%2 ){
__half last_element;
Array<half_t, 1> tmp_last;
Array<half_t, 1> *tmp_last_ptr = &tmp_last;
tmp_last_ptr[0] = in_ptr_half[N-1];
last_element = reinterpret_cast<__half const &>(tmp_last);
result_d = __hadd(result_d, last_element);
}
Array<half_t, 1> *result_ptr = &result;
*result_ptr = reinterpret_cast<Array<half_t, 1> &>(result_d);
#else
Reduce< plus<half_t>, half_t > scalar_reduce;
result.clear();
CUTLASS_PRAGMA_UNROLL
for (auto i = 0; i < N; ++i) {
result[0] = scalar_reduce(result[0], input[i]);
}
#endif
}
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specializations of Reduce for AlignedArray<half_t, N>
template <int N>
struct Reduce < plus<half_t>, AlignedArray<half_t, N> > {
CUTLASS_HOST_DEVICE
Array<half_t, 1> operator()(AlignedArray<half_t, N> const &input) {
Array<half_t, 1> result;
// If there is only 1 element - there is nothing to reduce
if( N ==1 ){
result[0] = input.front();
} else {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
__half result_d;
AlignedArray<half_t, 1> const *in_ptr_half = reinterpret_cast<AlignedArray<half_t, 1> const *>(&input);
AlignedArray<half_t, 2> const *in_ptr_half2 = reinterpret_cast<AlignedArray<half_t, 2> const *>(&input);
__half2 const *x_in_half2 = reinterpret_cast<__half2 const *>(in_ptr_half2);
// Set initial result = first half2, in case N==2
__half2 tmp_result = x_in_half2[0];
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < N/2; ++i) {
tmp_result = __hadd2(x_in_half2[i], tmp_result);
}
result_d = __hadd(__low2half(tmp_result), __high2half(tmp_result));
// One final step is needed for odd "N" (to add the (N-1)th element)
if( N%2 ){
__half last_element;
AlignedArray<half_t, 1> tmp_last;
AlignedArray<half_t, 1> *tmp_last_ptr = &tmp_last;
tmp_last_ptr[0] = in_ptr_half[N-1];
last_element = reinterpret_cast<__half const &>(tmp_last);
result_d = __hadd(result_d, last_element);
}
Array<half_t, 1> *result_ptr = &result;
*result_ptr = reinterpret_cast<Array<half_t, 1> &>(result_d);
#else
Reduce< plus<half_t>, half_t > scalar_reduce;
result.clear();
CUTLASS_PRAGMA_UNROLL
for (auto i = 0; i < N; ++i) {
result[0] = scalar_reduce(result[0], input[i]);
}
#endif
}
return result;
}
};
}
}
}
| include/cutlass/reduction/thread/reduce.h/0 | {
"file_path": "include/cutlass/reduction/thread/reduce.h",
"repo_id": "include",
"token_count": 2815
} | 46 |
[README](../../README.md#documentation) > **CUTLASS 3.0: Building on Windows with Visual Studio**
# Building on Windows with Visual Studio
CUTLASS 3.2 reintroduces support for the Microsoft Visual Studio compiler on Windows.
Users and developers may build either
in Visual Studio's graphical integrated development environment,
or on the command line with `cmake --build`.
# Software prerequisites
1. Windows 10 or 11
2. Visual Studio 2019 version 16.11.27, or Visual Studio 2022
3. CUDA Toolkit (at least 12.2; earlier 12.x versions may work)
4. CMake (at least 3.18)
5. git
6. Python (at least 3.6)
Visual Studio must be installed *before* the CUDA Toolkit.
Otherwise, Visual Studio's build system won't know about CUDA.
# Operating system settings
By default, Windows restricts the maximum file path length (`MAX_PATH`) to 260 characters.
CUTLASS has many files and directory paths that challenge this requirement.
As a result, CUTLASS is unlikely to build with this default setting.
The choice of source and build directories affect path lengths,
so the kinds of errors and whether they occur may depend on this.
Symptoms may vary, from errors when running `cmake`
(e.g., during the "generating library instances" step) to build failures.
CUTLASS recommends changing the maximum file path length setting
and rebooting the computer before attempting to clone or build CUTLASS.
Windows 10 (as of version 1607) and 11 permit changing this setting
by making sure that the following registry key exists,
and that its value is set to 1.
```
Computer\HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\FileSystem\LongPathsEnabled
```
After changing the registry key's value, reboot the computer first
before attempting to clone or build CUTLASS.
[This Microsoft help article](https://learn.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=registry)
explains different ways to change the registry setting.
# Limitations
Currently, it's possible to build examples and tests.
Building the CUTLASS library (e.g., for profiling) with default settings does not currently work,
because Visual Studio's linker cannot handle more than 65535 symbols in a library.
(The symptom of this issue is a LNK1189 linker error.)
The known way to work around this Visual Studio limitation is to disable building CUTLASS's library,
by setting the CMake option `CUTLASS_ENABLE_LIBRARY` to `OFF`.
Another approach may be to limit the number of kernels in the library
by setting the CMake option `CUTLASS_LIBRARY_KERNELS`
so that CUTLASS tries to put fewer kernels in the library.
# Set up build environment
1. Run "git bash" to get a familiar command-line interface
2. Edit `~/.profile` and set the environment variables as needed to access the CUTLASS repository
3. Clone the CUTLASS repository
4. Create the `build` subdirectory in the CUTLASS clone directory, and run CMake in it,
specifying whatever CMake options are desired, e.g.,
`cmake .. -DCUTLASS_NVCC_ARCHS=90a -DCUTLASS_ENABLE_LIBRARY=OFF`
Alternate approaches may rely on the CMake GUI and/or Windows' native command line.
# Building
A successful CMake run will create a `CUTLASS.sln` Visual Studio "solution" file in the build directory.
One can open this in Visual Studio and build the entire solution or any subset of projects as desired.
It may be necessary to limit maximum build parallelism by setting the appropriate Visual Studio option.
Alternately, one can run `cmake --build . --config Release -j 4` in the build directory.
Replace 4 with the desired maximum build parallelism.
It's important to put the `--build` option before the period that signifies the build directory.
The `--config` option specifies the kind of build;
`--config Release` builds a Release build, while `--config Debug` builds a Debug build.
Unlike with CMake's Makefile or Ninja generators,
`CMAKE_BUILD_TYPE` has no effect on the Visual Studio generator,
because the Visual Studio generator creates all build configurations.
| media/docs/build/building_in_windows_with_visual_studio.md/0 | {
"file_path": "media/docs/build/building_in_windows_with_visual_studio.md",
"repo_id": "media",
"token_count": 1063
} | 47 |
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Functionality")
[README](../../README.md#documentation) > **Functionality**
# Functionality
Note : CUTLASS-3 requires users to use CUDA 11.4 or newer, and SM70 or newer, for the target toolkit and architecture, respectively.
Please refer to the [Compatibility](/README.md#Compatibility) section for more details.
- N - Column Major Matrix
- T - Row Major matrix
- {N,T} x {N,T} - All combinations, i.e., NN, NT, TN, TT
- [NHWC](/include/cutlass/layout/tensor.h#L63-206) - 4 dimension tensor used for convolution
- [NCxHWx](/include/cutlass/layout/tensor.h#L290-395) - Interleaved 4 dimension tensor used for convolution
- f - floating point
- s - signed int
- b - bit
- cf - complex float
- bf16 - bfloat16
- tf32 - tfloat32
- Simt - Use Simt CUDA Core MMA
- TensorOp - Use Tensor Core MMA
- SpTensorOp - Use Sparse Tensor Core MMA
- WmmaTensorOp - Use WMMA abstraction to use Tensor Core MMA
## Device-level GEMM
The following tables summarize device-level GEMM kernels in CUTLASS, organized by opcode class, data type, and layout.
Hyperlinks to relevant unit tests demonstrate how specific template instances may be defined.
### CUTLASS 3.x Kernels
|**Opcode Class** | **Compute Capability** | **CUDA Toolkit** | **Data Type** | **Layouts** | **Unit Test** |
|-----------------|------------------------|------------------|--------------------------------|------------------------|------------------|
| **TensorOp** | 90a | 12.0+ | `f16 * f16 + { f16, f32 } => { f16, f32 }` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized.cu) |
| **TensorOp** | 90a | 12.0+ | `bf16 * bf16 + { f16, f32 } => { bf16, f32 }`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/sm90_gemm_bf16_bf16_bf16_tensor_op_f32.cu) |
| **TensorOp** | 90a | 12.0+ | `{f32, tf32} * {f32, tf32} + f32 => f32`| { T } x { N } => {N,T} | [example](/test/unit/gemm/device/sm90_gemm_f32_f32_f32_tensor_op_f32.cu) |
| **TensorOp** | 90a | 12.0+ | `s8 * s8 + s32 => {s32, s8}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/sm90_gemm_s8_s8_s8_tensor_op_s32.cu) |
### CUTLASS 2.x Kernels
|**Opcode Class** | **Compute Capability** | **CUDA Toolkit** | **Data Type** | **Layouts** | **Unit Test** |
|-----------------|------------------------|------------------|--------------------------------|------------------------|------------------|
| **Simt** | 50+ | 11.4+ | `f32 * f32 + f32 => f32` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/simt_sgemm_nt_sm50.cu) |
| **Simt** | 50+ | 11.4+ | `f64 * f64 + f64 => f64` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/simt_dgemm_nt_sm50.cu) |
| **Simt** | 60+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/simt_hgemm_nt_sm50.cu) |
| **Simt** | 61+ | 11.4+ | `s8 * s8 + s32 => {s32,s8}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/simt_igemm_nt_sm50.cu) |
| **WmmaTensorOp** | 70+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16t_f16t_f16n_wmma_tensor_op_f16_sm70.cu) |
| **WmmaTensorOp** | 70+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16t_f16t_f16n_wmma_tensor_op_f32_sm70.cu) |
| **WmmaTensorOp** | 75+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_s8t_s8n_s8t_wmma_tensor_op_s32_sm72.cu) |
| **WmmaTensorOp** | 75+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_s4t_s4n_s4t_wmma_tensor_op_s32_sm75.cu) |
| **WmmaTensorOp** | 75+ | 11.4+ | `b1 ^ b1 + s32 => {s32, b1}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_b1t_b1n_b1t_wmma_tensor_op_s32_sm75.cu) |
| **TensorOp** | 70+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_volta_tensor_op_f16_sm70.cu) |
| **TensorOp** | 70+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_volta_tensor_op_f32_sm70.cu) |
| **TensorOp** | 75+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sm75.cu) |
| **TensorOp** | 75+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f32_sm75.cu) |
| **TensorOp** | 75+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_s8t_s8n_s32n_tensor_op_s32_sm75.cu) |
| **TensorOp** | 75+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_s4t_s4n_s32n_tensor_op_s32_sm75.cu) |
| **TensorOp** | 75+ | 11.4+ | `b1 ^ b1 + s32 => {s32, b1}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_b1t_b1n_s32n_tensor_op_s32_sm75.cu) |
| **TensorOp** | 80+ | 11.4+ | `f16 * f16 + f16 => f16` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f16_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16t_f16t_tensor_op_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `bf16 * bf16 + f32 => {bf16, f32}`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_bf16n_bf16t_bf16t_tensor_op_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `tf32 * tf32 + f32 => f32`| {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f32n_f32t_f32t_tensor_op_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_s8t_s8n_s32n_tensor_op_s32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_s4t_s4n_s32n_tensor_op_s32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `b1 ^ b1 + s32 => {s32, b1}` | { T } x { N } => {N,T} | [example](/test/unit/gemm/device/gemm_b1t_b1n_s32n_tensor_op_s32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `f64 * f64 + f64 => f64` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `cf32 * cf32 + cf32 => cf32` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_cf32n_cf32t_cf32t_tensor_op_tf32_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `cf64 * cf64 + cf64 => cf64` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_cf64n_cf64t_cf64t_tensor_op_f64_sm80.cu), [Gaussian 3m](/test/unit/gemm/device/gemm_cf64n_cf64t_cf64t_tensor_op_f64_gaussian_sm80.cu) |
| **SpTensorOp** | 80+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sparse_sm80.cu) |
| **SpTensorOp** | 80+ | 11.4+ | `bf16 * bf16 + f32 => {bf16, f32}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sparse_sm80.cu) |
| **SpTensorOp** | 80+ | 11.4+ | `tf32 * tf32 + f32 => f32` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f32n_f32n_f32t_tensor_op_f32_sparse_sm80.cu) |
| **SpTensorOp** | 80+ | 11.4+ | `s8 * s8 + s32 => {s8, s32}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_s8t_s8n_s32t_tensor_op_s32_sparse_sm80.cu) |
| **SpTensorOp** | 80+ | 11.4+ | `s4 * s4 + s32 => {s4, s32}` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_s4t_s4n_s32t_tensor_op_s32_sparse_sm80.cu) |
| **TensorOp** | 90+ | 11.8+ | `f64 * f64 + f64 => f64` | {N,T} x {N,T} => {N,T} | [example](/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm90.cu) |
## Device-level Implicit GEMM convolution
The following table summarizes device-level implicit GEMM convolution kernels in CUTLASS, organized by opcode class, data type, and layout.
Hyperlinks to relevant conv2d fprop unit tests demonstrate how specific template instances may be defined.
One can find and/or create equivalent dgrad and wgrad convolutional operators.
|**Opcode Class** | **Compute Capability** | **CUDA Toolkit** | **Data Type** | **Layouts** | **Unit Test** |
|-----------------|------------------------|------------------|--------------------------------|------------------|------------------|
| **Simt** | 50+ | 11.4+ | `f32 * f32 + f32 => f32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm50.cu) |
| **Simt** | 50+ | 11.4+ | `cf32 * cf32 + cf32 => cf32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_cf32nhwc_cf32nhwc_cf32nhwc_simt_f32_sm50.cu) |
| **TensorOp** | 70+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm70.cu) |
| **TensorOp** | 75+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm75.cu) |
| **TensorOp** | 75+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | NHWC, NCxHWx | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s8nhwc_s8nhwc_s32nhwc_tensor_op_s32_sm75.cu), [ncxhwx](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s8ncxhwx_s8cxrskx_s8ncxhwx_tensor_op_s32_sm75.cu) |
| **TensorOp** | 75+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | NHWC, NCxHWx | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4nhwc_s4nhwc_s32nhwc_tensor_op_s32_sm75.cu), [ncxhwx](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4ncxhwx_s4cxrskx_s4ncxhwx_tensor_op_s32_sm75.cu) |
| **Simt** | 80+ | 11.4+ | `f32 * f32 + f32 => f32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu) |
| **Simt** | 80+ | 11.4+ | `cf32 * cf32 + cf32 => cf32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_cf32nhwc_cf32nhwc_cf32nhwc_simt_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `f16 * f16 + f32 => {f16, f32}`| NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `f16 * f16 + f16 => f16` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `tf32 * tf32 + f32 => f32` | NHWC | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `s8 * s8 + s32 => {s32, s8}` | NHWC, NCxHWx | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s8nhwc_s8nhwc_s32nhwc_tensor_op_s32_sm80.cu), [ncxhwx](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s8ncxhwx_s8cxrskx_s8ncxhwx_tensor_op_s32_sm80.cu) |
| **TensorOp** | 80+ | 11.4+ | `s4 * s4 + s32 => {s32, s4}` | NHWC, NCxHWx | [example](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4nhwc_s4nhwc_s32nhwc_tensor_op_s32_sm80.cu), [ncxhwx](/test/unit/conv/device/conv2d_fprop_implicit_gemm_s4ncxhwx_s4cxrskx_s4ncxhwx_tensor_op_s32_sm80.cu) |
## Warp-level Matrix Multiply with Tensor Cores
The following table summarizes supported warp level shapes for each TensorOp instruction.
|**Opcode Class** | **Instruction Shape** | **Warp Shapes** |
|-----------------|-----------------------|--------------------------------------------|
| **TensorOp** | 8-by-8-by-4 | 32x32x4, 32x64x4, 64x32x4, 64x64x4 |
| **TensorOp** | 16-by-8-by-8 | 32x32x8, 32x64x8, 64x32x8, 64x64x8 |
| **TensorOp** | 16-by-8-by-16 | 32x32x16, 32x64x16, 64x32x16, 64x64x16 |
| **TensorOp** | 8-by-8-by-16 | 32x32x16, 32x64x16, 64x32x16, 64x64x16 |
| **TensorOp** | 8-by-8-by-32 | 32x32x32, 32x64x32, 64x32x32, 64x64x32 |
| **TensorOp** | 16-by-8-by-32 | 32x32x32, 32x64x32, 64x32x32, 64x64x32 |
| **TensorOp** | 16-by-8-by-64 | 32x32x64, 32x64x64, 64x32x64, 64x64x64 |
| **TensorOp** | 8-by-8-by-128 | 32x32x128, 32x64x128, 64x32x128, 64x64x128 |
| **TensorOp** | 16-by-8-by-256 | 32x32x256, 32x64x256, 64x32x256, 64x64x256 |
| **SpTensorOp** | 16-by-8-by-16 | 64x64x16, 64x32x16, 32x64x16, 32x32x16 |
| **SpTensorOp** | 16-by-8-by-32 | 64x64x32, 64x32x32, 32x64x32, 32x32x32 |
| **SpTensorOp** | 16-by-8-by-64 | 64x64x64, 64x32x64, 32x64x64, 32x32x64 |
| **SpTensorOp** | 16-by-8-by-128 | 64x64x128, 64x32x128, 32x64x128, 32x32x128 |
TensorOp instructions depend on a permuted shared memory layout that can be efficiently
loaded from. The following tables summarize the destination shared memory layout that
can be targeted by matrix operands. It is assumed that each thread loads 128b vectors
from global memory with layout specified in the column "GMEM Layout."
**TensorOp 8-by-8-by-4.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|-----------------------------------------|
| **A** | `half_t` | `ColumnMajor` | `ColumnMajorVoltaTensorOpCongruous<16>` |
| **A** | `half_t` | `RowMajor` | `RowMajorVoltaTensorOpCrosswise<16>` |
| **B** | `half_t` | `ColumnMajor` | `ColumnMajorVoltaTensorOpCrosswise<16>` |
| **B** | `half_t` | `RowMajor` | `RowMajorVoltaTensorOpCongruous<16>` |
| **C** | `half_t` | `RowMajor` | `RowMajor` |
| **C** | `float` | `RowMajor` | `RowMajor` |
**TensorOp 16-by-8-by-8.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `half_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<16>` |
| **A** | `half_t` | `RowMajor` | `RowMajorTensorOpCrosswise<16>` |
| **B** | `half_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<16>` |
| **B** | `half_t` | `RowMajor` | `RowMajorTensorOpCongruous<16>` |
| **C** | `half_t` | `RowMajor` | `RowMajor` |
| **C** | `float` | `RowMajor` | `RowMajor` |
**TensorOp 16-by-8-by-8.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `tfloat32_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<32>` |
| **A** | `tfloat32_t` | `RowMajor` | `RowMajorTensorOpCrosswise<32>` |
| **B** | `tfloat32_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<32>` |
| **B** | `tfloat32_t` | `RowMajor` | `RowMajorTensorOpCongruous<32>` |
| **C** | `float` | `RowMajor` | `RowMajor` |
**TensorOp 16-by-8-by-16.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `half_t`, `bfloat16_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<16>` |
| **A** | `half_t`, `bfloat16_t` | `RowMajor` | `RowMajorTensorOpCrosswise<16>` |
| **B** | `half_t`, `bfloat16_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<16>` |
| **B** | `half_t`, `bfloat16_t` | `RowMajor` | `RowMajorTensorOpCongruous<16>` |
| **C** | `half_t` | `RowMajor` | `RowMajor` |
| **C** | `float` | `RowMajor` | `RowMajor` |
**TensorOp 8-by-8-by-4.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `double` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<64>` |
| **A** | `double` | `RowMajor` | `RowMajorTensorOpCrosswise<64>` |
| **B** | `double` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<64>` |
| **B** | `double` | `RowMajor` | `RowMajorTensorOpCongruous<64>` |
| **C** | `double` | `RowMajor` | `RowMajor` |
**TensorOp 8-by-8-by-16.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `int8_t` | `RowMajor` | `RowMajorTensorOpCrosswise<8>` |
| **B** | `int8_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<8>` |
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
**TensorOp 16-by-8-by-32.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `int8_t` | `RowMajor` | `RowMajorTensorOpCrosswise<8>` |
| **B** | `int8_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<8>` |
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
**TensorOp 8-by-8-by-32.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `int4b_t` | `RowMajor` | `RowMajorTensorOpCrosswise<4>` |
| **B** | `int4b_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<4>` |
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
**TensorOp 16-by-8-by-64.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `int4b_t` | `RowMajor` | `RowMajorTensorOpCrosswise<4>` |
| **B** | `int4b_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<4>` |
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
**TensorOp 8-by-8-by-128.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `bin1_t` | `RowMajor` | `RowMajorTensorOpCrosswise<4>` |
| **B** | `bin1_t` | `ColumnMajor` | `ColumnMajorTensorOpCongruous<4>` |
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
**SpTensorOp 16-by-8-by-16.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `tfloat32_t` | `RowMajor` | `RowMajorTensorOpCrosswise<32, 32>` |
| **B** | `tfloat32_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<32, 32>`|
| **C** | `float` | `RowMajor` | `RowMajor` |
**SpTensorOp 16-by-8-by-32.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|---------------------------------------|
| **A** | `half_t` | `RowMajor` | `RowMajorTensorOpCrosswise<16, 64>` |
| **B** | `half_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<16, 64>`|
| **C** | `float` | `RowMajor` | `RowMajor` |
**SpTensorOp 16-by-8-by-64.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|---------------------------------------|
| **A** | `int8_t` | `RowMajor` | `RowMajorTensorOpCrosswise<8, 128>` |
| **B** | `int8_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<8, 128>`|
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
**SpTensorOp 16-by-8-by-128.**
|**Operand**|**Element** | **GMEM Layout** | **SMEM Layout** |
|-----------|--------------|-----------------|------------------------------------|
| **A** | `int4b_t` | `RowMajor` | `RowMajorTensorOpCrosswise<4, 256>` |
| **B** | `int4b_t` | `ColumnMajor` | `ColumnMajorTensorOpCrosswise<4, 256>`|
| **C** | `int32_t` | `RowMajor` | `RowMajor` |
## Warp-level Matrix Multiply with CUDA WMMA API
The following table summarizes supported warp level shapes for each WmmaTensorOp instruction.
|**Opcode Class** | **Instruction Shape** | **Warp Shapes** |
|---------------------|-----------------------|--------------------------------------------|
| **WmmaTensorOp** | 16-by-16-by-16 | 32x32x16, 32x64x16, 64x32x16 |
| **WmmaTensorOp** | 8-by-32-by-16 | 32x32x16, 32x64x16, 64x32x16 |
| **WmmaTensorOp** | 32-by-8-by-16 | 32x32x16, 32x64x16, 64x32x16 |
| **WmmaTensorOp** | 8-by-8-by-32 | 32x32x32, 32x64x32, 64x32x32, 64x64x32 |
| **WmmaTensorOp** | 8-by-8-by-128 | 32x32x128, 32x64x128, 64x32x128, 64x64x128 |
CUDA exposes warp-level matrix operations in the CUDA C++ WMMA API. The CUDA C++ WMMA API exposes Tensor Cores via a set of functions and types in the `nvcuda::wmma` namespace. The functions and types in `nvcuda::wmma` provide target-independent APIs and implement architecture-specific tensor operation using TensorOp instruction underneath. CUTLASS exposes WMMA API through WmmaTensorOp. The WmmaTensorOp supports canonical shared memory layouts. The following table summarizes the destination shared memory layout that can be targeted by matrix operands. The WMMA API expects that matrices in shared memory loaded by `nvcuda::wmma::load_matrix_sync()` satisfy 128 bit alignment.
**WmmaTensorOp (all matrix sizes and data types).**
|**Operand** | **GMEM Layout** | **SMEM Layout** |
|------------|----------------------------|------------------------------|
| **A** | `RowMajor`, `ColumnMajor` | `RowMajor`, `ColumnMajor` |
| **B** | `RowMajor`, `ColumnMajor` | `RowMajor`, `ColumnMajor` |
| **C** | `RowMajor`, `ColumnMajor` | `RowMajor`, `ColumnMajor` |
# Copyright
Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| media/docs/functionality.md/0 | {
"file_path": "media/docs/functionality.md",
"repo_id": "media",
"token_count": 13595
} | 48 |
![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "Complete CUDA GEMM decomposition")
# Python packages associated with CUTLASS
This directory contains Python packages that are associated with CUTLASS:
* `cutlass`: the CUTLASS Python interface, which enables one to compile and run CUTLASS kernels from within Python
* `cutlass_library`: utilities used for enumerating and emitting C++ code for CUTLASS kernels
## CUTLASS Python Interface
The CUTLASS Python interface enables one to compile and run CUTLASS operations from within Python.
```python
import cutlass
import numpy as np
plan = cutlass.op.Gemm(element=np.float16, layout=cutlass.LayoutType.RowMajor)
A, B, C, D = [np.ones((1024, 1024), dtype=np.float16) for i in range(4)]
plan.run(A, B, C, D)
```
### Overview
The CUTLASS Python interface prioritizes ease of use.
It has the following features that support this goal.
* It presents high-level interfaces for operators, that require only few parameters.
* It selects sensible default configurations for an operator given the parameters that have been specified.
* It enumerates configurations for users that are known to work in a given setting.
* It favors emitting descriptive Python run-time exceptions instead of C++ compile-time errors, where possible.
* It simplifies exporting CUTLASS kernels to framework extensions (e.g., PyTorch CUDA extensions).
#### Non-goals
The CUTLASS Python interface does not intend to:
1. select optimal kernel configurations,
2. act as a fast container for CUTLASS kernels, or
3. act as a Python-to-CUDA-kernel just-in-time (JIT) compilation engine.
Regarding selection of optimal kernel configurations,
the interface favors ease-of-use over maximum configurability.
Thus, its default selections for operator parameters may
not achieve the highest possible performance in all scenarios. Users wishing to achieve the highest performance possible should either
* select parameters by profiling different combinations of them, or
* use a library such as [cuBLAS](https://developer.nvidia.com/cublas)
that contains heuristics for selecting kernels.
Regarding acting as a fast container for CUTLASS kernels:
the interface does not strive to minimize overhead in its Python functions surrounding the running of a kernel.
Those wishing to deploy a CUTLASS kernel should either
* use the C++ emitted by the Python interface directly, or
* use one of the CUTLASS emitters for automatically creating a framework extension for the kernel (e.g., a PyTorch CUDA extension).
Regarding acting as a Python-to-CUDA-kernel JIT compilation engine:
the interface enables use of CUTLASS in Python code.
It can be used by frameworks for JIT compiling
Python to CUDA kernels, but does not set out to be such a framework.
#### Comparison to PyCUTLASS
The CUTLASS Python interface builds atop CUTLASS's [PyCUTLASS](https://github.com/NVIDIA/cutlass/tree/v3.0.0/tools/library/scripts/pycutlass) library. PyCUTLASS enables
one to declare, compile, and run GEMMs, convolutions, and grouped GEMM operators with nearly the same configuration
space as CUTLASS's C++ interface. While this flexibility enables one to achieve the similar levels of functionality
as available in CUTLASS's C++ interface, it comes with the burden of needing to specify many configuration parameters
to operators -- similar to what one must do in specifying template parameters to operations in CUTLASS's C++ interface.
In contrast, the CUTLASS Python interface aims to provide a higher-level API for declaring, emitting, and compiling
kernels that does not require exhaustively defining template parameters.
### Current functionality
The CUTLASS Python interface currently supports the following operations:
* GEMMs
* GEMMs with fused elementwise epilogues (e.g., ReLU) (for pre-SM90 kernels)
* Stream K swizzling (for pre-SM90 kernels)
* Grouped GEMM (for pre-SM90 kernels)
### Getting started
We recommend using the CUTLASS Python interface via an [NGC PyTorch Docker container](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch):
```bash
docker run --gpus all -it --rm nvcr.io/nvidia/pytorch:23.08-py3 -p 8888:8888
```
The CUTLASS Python interface has been tested with CUDA 11.8, 12.0, and 12.1 on Python 3.8 and 3.9.
#### Optional environment variables
Prior to installing the CUTLASS Python interface, one may optionally set the following environment variables:
* `CUTLASS_PATH`: the path to the cloned CUTLASS repository
* `CUDA_INSTALL_PATH`: the path to the installation of CUDA
If these environment variables are not set, the installation process will infer them to be the following:
* `CUTLASS_PATH`: either one directory level above the current directory (i.e., `$(pwd)/..`) if installed locally or in the `source` directory of the location in which `cutlass_library` was installed
* `CUDA_INSTALL_PATH`: the directory holding `/bin/nvcc` for the first version of `nvcc` on `$PATH` (i.e., `which nvcc | awk -F'/bin/nvcc' '{print $1}'`)
**NOTE:** The version of `cuda-python` installed must match the CUDA version in `CUDA_INSTALL_PATH`.
#### Installation
Stable releases of the CUTLASS Python interface are available via the `nvidia-cutlass` PyPI package. Any other packages with the name `cutlass` are not affiliated with NVIDIA CUTLASS.
```bash
pip install nvidia-cutlass
```
The CUTLASS Python interface can also be installed from source by navigating to the root of the CUTLASS directory and performing
```bash
pip install .
```
If you would like to be able to make changes to the CUTLASS Python interface and have them reflected when using the interface, perform:
```bash
pip install -e .
```
To test that your installation was successful, you can run:
```python
import cutlass
import numpy as np
plan = cutlass.op.Gemm(element=np.float16, layout=cutlass.LayoutType.RowMajor)
A, B, C, D = [np.ones((128, 128), dtype=np.float16) for i in range(4)]
plan.run(A, B, C, D)
```
### Deep learning framework CUDA extensions
The CUTLASS Python interface provides utilities for exporting a CUTLASS kernel to a deep learning framework CUDA extensions. Currently, PyTorch CUDA extensions can be exported, but a similar pattern could be applied for other frameworks as well. An example of this is provided [here](/examples/python/02_pytorch_extension_grouped_gemm.ipynb).
Currently, the following operations can be exported to a PyTorch CUDA extension:
* GEMM
* Grouped GEMM
* Conv2d
### Examples
Jupyter notebook examples of using the CUTLASS Python interface are located in [examples/python](/examples/python).
To launch these notebooks from this directory, run:
```bash
jupyter-lab ../examples/python
```
### Building documentation
The CUTLASS Python interface uses [Sphinx](https://www.sphinx-doc.org/en/master/) for documentation.
Building the documentation requires additional packages. The following commands will install them.
```bash
sudo apt-get install pandoc
pip install --upgrade Sphinx furo pandoc myst-parser sphinx-copybutton nbsphinx nbsphinx-link sphinx-inline-tabs
```
To build documentation, you must first have installed the CUTLASS Python interface via the
[installation instructions](#installation).
Documentation can then be built via the following commands.
```bash
sphinx-apidoc -o docs_src/source/ cutlass/ cutlass/backend*
cd docs_src
make html
mv _build/* ../docs
```
## CUTLASS library package
[cutlass_library](/python/cutlass_library) contains utilities for enumerating and emitting CUTLASS C++ kernels.
It is used by the CUTLASS CMake system to construct a library of kernels that can be profiled using the CUTLASS profiler.
To install the `cutlass_library` package, run
```bash
python setup_library.py develop --user
```
Alternatively, `cutlass_library` will automatically be installed if you install the CUTLASS Python interface package.
You can also use the [generator.py](/python/cutlass_library/generator.py) script directly without installing the module.
# Copyright
Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| python/README.md/0 | {
"file_path": "python/README.md",
"repo_id": "python",
"token_count": 2666
} | 49 |
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: 4a5275a3b68094ba1d8a4b7e4c459321
tags: 645f666f9bcd5a90fca523b33c5a78b7
| python/docs/.buildinfo/0 | {
"file_path": "python/docs/.buildinfo",
"repo_id": "python",
"token_count": 85
} | 50 |
/*
* doctools.js
* ~~~~~~~~~~~
*
* Base JavaScript utilities for all Sphinx HTML documentation.
*
* :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
"use strict";
const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([
"TEXTAREA",
"INPUT",
"SELECT",
"BUTTON",
]);
const _ready = (callback) => {
if (document.readyState !== "loading") {
callback();
} else {
document.addEventListener("DOMContentLoaded", callback);
}
};
/**
* Small JavaScript module for the documentation.
*/
const Documentation = {
init: () => {
Documentation.initDomainIndexTable();
Documentation.initOnKeyListeners();
},
/**
* i18n support
*/
TRANSLATIONS: {},
PLURAL_EXPR: (n) => (n === 1 ? 0 : 1),
LOCALE: "unknown",
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext: (string) => {
const translated = Documentation.TRANSLATIONS[string];
switch (typeof translated) {
case "undefined":
return string; // no translation
case "string":
return translated; // translation exists
default:
return translated[0]; // (singular, plural) translation tuple exists
}
},
ngettext: (singular, plural, n) => {
const translated = Documentation.TRANSLATIONS[singular];
if (typeof translated !== "undefined")
return translated[Documentation.PLURAL_EXPR(n)];
return n === 1 ? singular : plural;
},
addTranslations: (catalog) => {
Object.assign(Documentation.TRANSLATIONS, catalog.messages);
Documentation.PLURAL_EXPR = new Function(
"n",
`return (${catalog.plural_expr})`
);
Documentation.LOCALE = catalog.locale;
},
/**
* helper function to focus on search bar
*/
focusSearchBar: () => {
document.querySelectorAll("input[name=q]")[0]?.focus();
},
/**
* Initialise the domain index toggle buttons
*/
initDomainIndexTable: () => {
const toggler = (el) => {
const idNumber = el.id.substr(7);
const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`);
if (el.src.substr(-9) === "minus.png") {
el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`;
toggledRows.forEach((el) => (el.style.display = "none"));
} else {
el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`;
toggledRows.forEach((el) => (el.style.display = ""));
}
};
const togglerElements = document.querySelectorAll("img.toggler");
togglerElements.forEach((el) =>
el.addEventListener("click", (event) => toggler(event.currentTarget))
);
togglerElements.forEach((el) => (el.style.display = ""));
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler);
},
initOnKeyListeners: () => {
// only install a listener if it is really needed
if (
!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS &&
!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS
)
return;
document.addEventListener("keydown", (event) => {
// bail for input elements
if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
// bail with special keys
if (event.altKey || event.ctrlKey || event.metaKey) return;
if (!event.shiftKey) {
switch (event.key) {
case "ArrowLeft":
if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
const prevLink = document.querySelector('link[rel="prev"]');
if (prevLink && prevLink.href) {
window.location.href = prevLink.href;
event.preventDefault();
}
break;
case "ArrowRight":
if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
const nextLink = document.querySelector('link[rel="next"]');
if (nextLink && nextLink.href) {
window.location.href = nextLink.href;
event.preventDefault();
}
break;
}
}
// some keyboard layouts may need Shift to get /
switch (event.key) {
case "/":
if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break;
Documentation.focusSearchBar();
event.preventDefault();
}
});
},
};
// quick alias for translations
const _ = Documentation.gettext;
_ready(Documentation.init);
| python/docs/_static/doctools.js/0 | {
"file_path": "python/docs/_static/doctools.js",
"repo_id": "python",
"token_count": 1836
} | 51 |
CUTLASS Python API
==================
.. toctree::
:maxdepth: 5
cutlass
| python/docs_src/source/modules.rst/0 | {
"file_path": "python/docs_src/source/modules.rst",
"repo_id": "python",
"token_count": 30
} | 52 |
################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Unit test for compute node in SM90
"""
import logging
import unittest
import cutlass
from cutlass.backend import *
from cutlass.epilogue import *
from cutlass import swizzle
from utils.evt_testbed import EVTTestBed, EVTTestCaseBase
cutlass.set_log_level(logging.WARNING)
@unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]")
class TestEVTCompute(EVTTestCaseBase):
def test_arith(self):
"""
Test Arithmatic op
"""
def evt_arith_compute(accum, C, alpha, beta, gamma):
D = ((accum + C) * alpha - gamma) / beta
return D
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"C": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.5,
"beta": 0.5,
"gamma": 2.5,
"D": self.fake_tensor(self.element, (l, m, n))
}
launcher = EVTTestBed(self.element, evt_arith_compute, example_inputs)
input_keys = ["C", "alpha", "beta", "gamma"]
result_keys = ["D"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_func_call(self):
"""
Test Function call
"""
def evt_func_call(accum, C, alpha, beta, gamma):
D = multiply_add(relu(accum + alpha) + C, beta, gamma)
return D
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"C": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.5,
"beta": 0.5,
"gamma": 2.5,
"D": self.fake_tensor(self.element, (l, m, n))
}
launcher = EVTTestBed(self.element, evt_func_call, example_inputs)
input_keys = ["C", "alpha", "beta", "gamma"]
result_keys = ["D"]
launcher.verify((m, n, k), input_keys, result_keys, l)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/evt/evt_compute_sm80_90.py/0 | {
"file_path": "test/python/cutlass/evt/evt_compute_sm80_90.py",
"repo_id": "test",
"token_count": 1571
} | 53 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Low-level functionality tests for GEMM with S8 operands on SM90
"""
from functools import partial
import logging
import unittest
import cutlass
from cutlass.backend.utils.device import device_cc
from utils import LayoutCombination, add_test_gemm
cutlass.set_log_level(logging.WARNING)
cc = 90
dtype = cutlass.DataType.s8
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM90 tests.')
@unittest.skipIf(cutlass.utils.datatypes.torch_type(dtype) is None, f'Version of torch installed does not contain a datatype match for {dtype}')
class GemmS8Sm90(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_specialized = partial(add_test_gemm, cls=GemmS8Sm90, element=dtype, compilation_modes=['nvcc'])
add_test_tensorop = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp)
# Tests with 1x1x1 clusters
add_test_tensorop(layouts=LayoutCombination.TNN, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 128, 128], stages=3)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 128, 128], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 8], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 128, 128], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[64, 128, 128], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 64, 32], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[ 4, 4, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 128, 128], stages=None)
# Tests with different cluster shapes
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[2, 2, 1], threadblock_shape=[128, 128, 128], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 4, 1], threadblock_shape=[128, 128, 128], stages=None)
# Tests with warp-specialized ping-pong schedule
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[2, 1, 1], threadblock_shape=[128, 128, 128], stages=None,
kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedPingpong,
epilogue_schedule=cutlass.EpilogueScheduleType.TmaWarpSpecialized)
# Tests for SIMT
add_test_simt = partial(add_test_specialized, opclass=cutlass.OpcodeClass.Simt)
add_test_simt(layouts=LayoutCombination.TNN, alignments=[1, 1, 1], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[64, 32, 8], stages=2)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/gemm/gemm_s8_sm90.py/0 | {
"file_path": "test/python/cutlass/gemm/gemm_s8_sm90.py",
"repo_id": "test",
"token_count": 1933
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implicit GEMM testbed
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/reduction/device/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "conv2d_problems.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/core_io.h"
#include "cutlass/util/tensor_view_io.h"
#include "../cache_testbed_output.h"
namespace test {
namespace conv {
namespace device {
template <typename Conv2d>
class TestbedConv2d {
public:
using ElementA = typename Conv2d::ElementA;
using LayoutA = typename Conv2d::LayoutA;
using ElementB = typename Conv2d::ElementB;
using LayoutB = typename Conv2d::LayoutB;
using ElementC = typename Conv2d::ElementC;
using LayoutC = typename Conv2d::LayoutC;
using ElementAccumulator = typename Conv2d::ElementAccumulator;
using ElementCompute = typename Conv2d::ElementCompute;
using EpilogueOutputOp = typename Conv2d::EpilogueOutputOp;
static cutlass::conv::Operator const kConvolutionalOperator = Conv2d::kConvolutionalOperator;
/// Reduction kernel
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
typename EpilogueOutputOp::ElementAccumulator,
EpilogueOutputOp::kCount
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>;
using ReductionDevice = cutlass::reduction::device::ReduceSplitK<ReductionKernel>;
using ReductionStrideIndex = typename ReductionDevice::StrideIndex;
public:
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
int tested_problem_count;
public:
TestbedConv2d(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_), tested_problem_count(0) {
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
void initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
int scope;
int bits = cutlass::sizeof_bits<Element>::value;
if (bits <= 8) {
scope = 2;
}
else if (bits == 16) {
if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) {
scope = 3;
}
else {
scope = 5;
}
}
else {
scope = 8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope, -scope, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(), view.capacity());
}
else {
}
}
void initialize(
cutlass::conv::Conv2dProblemSize const &problem_size, uint64_t seed = 2019) {
tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size));
tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
initialize_tensor(tensor_A.host_view(), init_A, seed);
initialize_tensor(tensor_B.host_view(), init_B, seed * 17);
initialize_tensor(tensor_C.host_view(), init_C, seed * 39);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
tensor_D_reference.sync_device();
}
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Conv2d::UnderlyingKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::conv::Conv2dProblemSize const &problem_size,
cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
// increment tested problem count run by the testbed
tested_problem_count++;
#if 0 // display conv2d problem size for debugging
std::cout << problem_size << std::endl
<< "alpha, beta: (" << alpha << ", " << beta << ")" << std::endl
<< "split_k_mode: " << ((split_k_mode == cutlass::conv::SplitKMode::kSerial) ? "(serial)" : "(parallel)") << std::endl
<< std::endl;
#endif
initialize(problem_size);
// configure the operator
Conv2d conv2d_op;
typename Conv2d::Arguments conv2d_args(
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D_computed.device_ref(),
{alpha, beta},
split_k_mode
);
// find workspace requirement for parallel split-k reduction
size_t workspace_size = Conv2d::get_workspace_size(conv2d_args);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = conv2d_op.initialize(conv2d_args, workspace.get());
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
// conv2d operation with parallel split-k-mode
if (split_k_mode == cutlass::conv::SplitKMode::kParallel) {
// conv2d output is written to workspace in global memory
conv2d_args.ref_D.reset(reinterpret_cast<ElementC*>(workspace.get()));
// accumulate mma for each cta in k-dimension (1.0 * A * B)
conv2d_args.output_op = {ElementCompute(1), ElementCompute(0)};
// update conv2d operator arguments
status = conv2d_op.update(conv2d_args, workspace.get());
}
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
// run conv2d operator
status = conv2d_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run." << std::endl;
return false;
}
if (split_k_mode == cutlass::conv::SplitKMode::kParallel) {
// configure parallel reduction operator
ReductionDevice reduction_op;
typename ReductionDevice::Arguments reduction_args(
cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(),
problem_size.split_k_slices,
cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, problem_size),
{
reinterpret_cast<ElementAccumulator*> (workspace.get()),
ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx])
},
{
tensor_D_computed.device_data(),
ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx])
},
{
tensor_C.device_data(),
ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx])
},
// apply alpha, beta to obtain the following equation alpha * ReduceAdd(A * B) + beta * C
{alpha, beta}
);
status = reduction_op.initialize(reduction_args, nullptr);
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
// run prallel reduction kernel
status = reduction_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
}
bool passed = false;
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " device reference error: "
<< cudaGetErrorString(result);
tensor_D_computed.sync_host();
//
// Reference check - support caching results
//
CachedTestKey cached_test_key = CreateCachedConv2dTestKey<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
ElementCompute
>(
kConvolutionalOperator,
problem_size,
alpha,
beta,
tensor_A.host_view(),
tensor_B.host_view(),
tensor_C.host_view()
);
//
// Look for the cached key
//
bool cached_result_loaded = false;
CachedTestResult cached_test_result;
std::string conv2d_result_cache_name =
std::string("cached_results_") + CUTLASS_TARGET_NAME + ".txt";
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
CachedTestResultListing cached_results(conv2d_result_cache_name);
auto cached = cached_results.find(cached_test_key);
cached_result_loaded = cached.first;
if (cached_result_loaded) {
cached_test_result = cached.second;
}
}
if (!cached_result_loaded) {
#if CUTLASS_CONV_TEST_UNIT_REFERENCE_DEVICE_ENABLED
cutlass::reference::device::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D_reference.device_ref(),
alpha,
beta);
// sync host (copy device data to host) for dumping error output in case of mismatches
tensor_D_reference.sync_host();
#else
cutlass::reference::host::Conv2d<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.host_ref(),
tensor_B.host_ref(),
tensor_C.host_ref(),
tensor_D_reference.host_ref(),
alpha,
beta);
#endif
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
cached_test_result.D = TensorHash(tensor_D_reference.host_view());
CachedTestResultListing cached_results(conv2d_result_cache_name);
cached_results.append(cached_test_key, cached_test_result);
cached_results.write(conv2d_result_cache_name);
}
} // if (!cached_result_loaded)
uint32_t tensor_D_hash = TensorHash(tensor_D_computed.host_view());
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
passed = (tensor_D_hash == cached_test_result.D);
EXPECT_EQ(tensor_D_hash, cached_test_result.D)
<< "Hash-based comparison failed for key:" << "\n" << cached_test_key << "\n";
}
else {
passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view());
}
EXPECT_TRUE(passed);
std::stringstream ss_problem_size_text;
ss_problem_size_text << "nhwc_"
<< problem_size.N << "x"
<< problem_size.H << "x"
<< problem_size.W << "x"
<< problem_size.C
<< "_krsc_"
<< problem_size.K << "x"
<< problem_size.R << "x"
<< problem_size.S << "x"
<< problem_size.C
<< "_padding_"
<< problem_size.pad_h << "x"
<< problem_size.pad_w
<< "_stride_"
<< problem_size.stride_h << "x"
<< problem_size.stride_w
<< "_dilation_"
<< problem_size.dilation_h << "x"
<< problem_size.dilation_w << "_"
<< (problem_size.mode == cutlass::conv::Mode::kCrossCorrelation ? "xcorr_" : "conv_");
if (!passed) {
std::stringstream fname;
fname << "error_Conv2d_ImplicitGemm_device_"
<< (split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial_reduction_" : "parallel_reduction_")
<< (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kFprop ? "fprop_" :
(Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ? "dgrad_" :
(Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDeconv ? "deconv_" : "wgrad_")))
<< ss_problem_size_text.str()
<< Conv2d::ThreadblockShape::kM << "x"
<< Conv2d::ThreadblockShape::kN << "x"
<< Conv2d::ThreadblockShape::kK << "_"
<< Conv2d::WarpShape::kM << "x"
<< Conv2d::WarpShape::kN << "x"
<< Conv2d::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nB:\n" << tensor_B.host_view() << "\n"
<< "\nC:\n" << tensor_C.host_view() << "\n";
results << "\nD reference (hash: " << cached_test_result.D << ")\n";
if (!cached_result_loaded) {
results
<< tensor_D_reference.host_view() << "\n";
}
results
<< "\nD computed (hash: " << tensor_D_hash << ")\n"
<< tensor_D_computed.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ImplicitGemm>
bool TestSpecificConv2d(
const Conv2dProblemVector & problem_sizes) {
bool passed = true;
//
// Testbed object
//
TestbedConv2d<ImplicitGemm> testbed;
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for(auto conv_problem : problem_sizes) {
//
// Test
//
// test mode = xcross
passed = testbed.run(
conv_problem,
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// test mode = convolution
passed = testbed.run(
conv_problem.reset_mode(cutlass::conv::Mode::kConvolution),
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// TestAllConv: Runs cutlass::conv::device::ImplicitGemmConvolution operator and compares it with reference
// TestAllConv runs conv operator on default conv problem sizes from test::conv::device::TestbedConv2dProblemSizes
// Additionally, each conv2d test can provide conv problem sizes (conv_test_sizes) and blacklist of sizes
// (conv_blacklist_sizes)
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ImplicitGemm>
bool TestAllConv2d(
const Conv2dProblemVector & conv_test_sizes = Conv2dProblemVector(),
const Conv2dProblemVector & conv_blacklist_sizes = Conv2dProblemVector()) {
bool passed = true;
//
// Testbed object
//
TestbedConv2d<ImplicitGemm> testbed;
//
// Get conv problem sizes to run conv operator
//
TestbedConv2dProblemSizes conv_problems(128/cutlass::sizeof_bits<typename ImplicitGemm::ElementA>::value);
// Vector of conv2d problem sizes to avoid duplicate runs
Conv2dProblemVector conv_tested_sizes;
// Vectors of Conv2dProblemVector (lenient/easiest to rigorous problem sizes)
std::vector<Conv2dProblemVector> problem_vectors = {
conv_test_sizes, // run user specified sizes
conv_problems.conv2d_default_sizes, // run default and cudnn bug sizes
//conv_problems.conv2d_resnet50_sizes, // run resnet50 sizes
#if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
conv_problems.conv2d_rigorous_sizes, // run large and rigorous sizes if enabled
#endif
};
// Flatten 2D problem_vectors into a 1D problem_sizes
std::vector<cutlass::conv::Conv2dProblemSize> problem_sizes;
for (auto problem_vector : problem_vectors) {
for(auto conv_problem : problem_vector) {
problem_sizes.push_back(conv_problem);
}
}
// If CUTLASS_UNIT_TEST_PROBLEM_COUNT is set reverse the order (rigorous to lenient)
// run the most rigorous problem size first
if (CutlassUnitTestProblemCount()) {
std::reverse(problem_sizes.begin(), problem_sizes.end());
}
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for(auto conv_problem : problem_sizes) {
// Skip blacklist and avoid duplicate problem sizes
if (std::find(conv_blacklist_sizes.begin(), conv_blacklist_sizes.end(), conv_problem) != conv_blacklist_sizes.end() ||
std::find(conv_tested_sizes.begin(), conv_tested_sizes.end(), conv_problem) != conv_tested_sizes.end()) {
continue;
}
//
// Procedurally disable certain cases
//
// CUTLASS DGRAD's *unity* stride specialization only support stride {1, 1}
if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ||
ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kUnity)) {
if (!((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) {
continue;
}
}
// Fixed channels algorithm requires channel count to match access size
if (ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kIteratorAlgorithm ==
cutlass::conv::IteratorAlgorithm::kFixedChannels) {
if (conv_problem.C != ImplicitGemm::UnderlyingKernel::Mma::IteratorA::AccessType::kElements) {
continue;
}
}
// Few channels algorithm requires channel count to match access size
if (ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kIteratorAlgorithm ==
cutlass::conv::IteratorAlgorithm::kFewChannels) {
if (conv_problem.C % ImplicitGemm::UnderlyingKernel::Mma::IteratorA::AccessType::kElements) {
continue;
}
}
// CUTLASS DGRAD's *strided* stride specialization supports all stride {stride_h, stride_w}
// Although strided dgrad works for all stride combinations, we are only going
// to run strided dgrad for non-unity strides
if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ||
ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kStrided)) {
if (((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) {
continue;
}
}
//
// Test
//
// push back tested problem size to avoid re-running duplicates
conv_tested_sizes.push_back(conv_problem);
// test mode = xcross
passed = testbed.run(
conv_problem,
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// test mode = convolution
passed = testbed.run(
conv_problem.reset_mode(cutlass::conv::Mode::kConvolution),
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// If CUTLASS_UNIT_TEST_PROBLEM_COUNT is set reduce the number of tested problem counts
if (CutlassUnitTestProblemCount() &&
testbed.tested_problem_count > CutlassUnitTestProblemCount()) {
return true;
}
}
// Small-channels convolution can't run here.
if (ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kIteratorAlgorithm ==
cutlass::conv::IteratorAlgorithm::kFixedChannels) {
return true;
}
// Small-channels convolution can't run here.
if (ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kIteratorAlgorithm ==
cutlass::conv::IteratorAlgorithm::kFewChannels) {
return true;
}
// CUTLASS DGRAD's *strided* specialization does not support split-k mode
if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ||
ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) &&
(ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport ==
cutlass::conv::StrideSupport::kStrided)) {
passed = testbed.run(
cutlass::conv::Conv2dProblemSize(
{1, 56, 56, 8}, // input size (NHWC)
{8, 1, 1, 8}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{2, 2}, // stride (stride_h, stride_w)
{1, 1}), // dilation (dilation_h, dilation_w)
cutlass::conv::SplitKMode::kSerial,
cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0),
cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0));
if (!passed) {
return false;
}
return passed;
}
// Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for
// a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
// which are abolutely necessary to catch functional bugs. The below code does provide option to sweep
// alpha and beta for local testing, but only runs one value for alpha and beta.
cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size (
{1, 17, 11, 288}, // input size (NHWC)
{160, 3, 3, 288}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1} // dilation (dilation_h, dilation_w)
);
cutlass::conv::SplitKMode split_k_modes [] = {
cutlass::conv::SplitKMode::kSerial,
cutlass::conv::SplitKMode::kParallel,
};
int split_k_slices[] = {
1, 2, 3, 4, 201
};
double problem_alpha[] = {
2.0
};
double problem_beta[] = {
2.0
};
for (auto split_k_mode : split_k_modes) {
for (auto split_k_slice : split_k_slices) {
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
passed = testbed.run(
conv2d_split_k_test_size.reset_split_k_slices(split_k_slice),
split_k_mode,
cutlass::from_real<typename ImplicitGemm::ElementCompute>(alpha),
cutlass::from_real<typename ImplicitGemm::ElementCompute>(beta));
if (!passed) {
return false;
}
// If CUTLASS_UNIT_TEST_PROBLEM_COUNT is set reduce the number of tested problem counts
if (CutlassUnitTestProblemCount() &&
testbed.tested_problem_count > CutlassUnitTestProblemCount()) {
return true;
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace conv
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/conv/device/conv2d_testbed.h/0 | {
"file_path": "test/unit/conv/device/conv2d_testbed.h",
"repo_id": "test",
"token_count": 10555
} | 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Depthwise Direct Conv testbed
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "../cache_testbed_output.h"
#include "conv2d_problems.h"
#include "cutlass/conv/device/direct_convolution.h"
#include "cutlass/core_io.h"
#include "cutlass/cutlass.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
namespace test {
namespace conv {
namespace device {
template <typename Conv2d>
class TestbedDepthwiseDirectConv2d {
public:
using ElementA = typename Conv2d::ElementA;
using LayoutA = typename Conv2d::LayoutA;
using ElementB = typename Conv2d::ElementB;
using LayoutB = typename Conv2d::LayoutB;
using ElementC = typename Conv2d::ElementC;
using LayoutC = typename Conv2d::LayoutC;
using ElementAccumulator = typename Conv2d::ElementAccumulator;
using ElementCompute = typename Conv2d::ElementCompute;
using EpilogueOutputOp = typename Conv2d::EpilogueOutputOp;
static cutlass::conv::Operator const kConvolutionalOperator = Conv2d::kConvolutionalOperator;
public:
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementB, LayoutB> tensor_reordered_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
int tested_problem_count;
public:
TestbedDepthwiseDirectConv2d(cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080)
: init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_), tested_problem_count(0) {}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
void initialize_tensor(cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
int scope;
int bits = cutlass::sizeof_bits<Element>::value;
if (bits <= 8) {
scope = 2;
} else if (bits == 16) {
if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) {
scope = 3;
} else {
scope = 5;
}
} else {
scope = 8;
}
cutlass::reference::host::TensorFillRandomUniform(view, seed, scope, -scope, 0);
} else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
} else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
} else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(), view.capacity());
} else {
}
}
void initialize(cutlass::conv::Conv2dProblemSize const &problem_size, uint64_t seed = 2019) {
tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size));
tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_reordered_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
initialize_tensor(tensor_A.host_view(), init_A, seed);
initialize_tensor(tensor_B.host_view(), init_B, seed * 17);
initialize_tensor(tensor_reordered_B.host_view(), init_B, seed * 17);
initialize_tensor(tensor_C.host_view(), init_C, seed * 39);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_reordered_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
tensor_D_reference.sync_device();
}
bool sufficient(int smem_size) const {
//
// Determine SMEM requirements and waive if not satisfied
//
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < static_cast<size_t>(smem_size)) {
return false;
}
return true;
}
/// Executes one test
bool run(cutlass::conv::Conv2dProblemSize const &problem_size,
cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial,
ElementCompute alpha = ElementCompute(1.5),
ElementCompute beta = ElementCompute(1)) {
// increment tested problem count run by the testbed
tested_problem_count++;
#if 0 // display conv2d problem size for debugging
std::cout << problem_size << std::endl
<< "alpha, beta: (" << alpha << ", " << beta << ")" << std::endl
<< "split_k_mode: "
<< ((split_k_mode == cutlass::conv::SplitKMode::kSerial) ? "(serial)" : "(parallel)")
<< std::endl
<< std::endl;
#endif
initialize(problem_size);
// configure the operator
Conv2d conv2d_op;
typename Conv2d::Arguments conv2d_args(problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D_computed.device_ref(),
{alpha, beta},
tensor_reordered_B.device_ref(),
split_k_mode);
// find workspace requirement for parallel split-k reduction
size_t workspace_size = Conv2d::get_workspace_size(conv2d_args);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = conv2d_op.can_implement(problem_size);
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
status = conv2d_op.initialize(conv2d_args, workspace.get());
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
if (!sufficient(conv2d_op.get_smem_size())) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
// run conv2d operator
status = conv2d_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run." << std::endl;
return false;
}
bool passed = false;
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " device reference error: " << cudaGetErrorString(result);
tensor_D_computed.sync_host();
//
// Reference check - support caching results
//
CachedTestKey cached_test_key =
CreateCachedConv2dTestKey<ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
ElementCompute>(kConvolutionalOperator,
problem_size,
alpha,
beta,
tensor_A.host_view(),
tensor_B.host_view(),
tensor_C.host_view());
//
// Look for the cached key
//
bool cached_result_loaded = false;
CachedTestResult cached_test_result;
std::string conv2d_result_cache_name =
std::string("cached_results_") + CUTLASS_TARGET_NAME + ".txt";
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
CachedTestResultListing cached_results(conv2d_result_cache_name);
auto cached = cached_results.find(cached_test_key);
cached_result_loaded = cached.first;
if (cached_result_loaded) {
cached_test_result = cached.second;
}
}
if (!cached_result_loaded) {
#if CUTLASS_CONV_TEST_UNIT_REFERENCE_DEVICE_ENABLED
cutlass::reference::device::Conv2d<ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator>(kConvolutionalOperator,
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D_reference.device_ref(),
alpha,
beta);
// sync host (copy device data to host) for dumping error output in case of mismatches
tensor_D_reference.sync_host();
#else
cutlass::reference::host::Conv2d<ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator>(kConvolutionalOperator,
problem_size,
tensor_A.host_ref(),
tensor_B.host_ref(),
tensor_C.host_ref(),
tensor_D_reference.host_ref(),
alpha,
beta);
#endif
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
cached_test_result.D = TensorHash(tensor_D_reference.host_view());
CachedTestResultListing cached_results(conv2d_result_cache_name);
cached_results.append(cached_test_key, cached_test_result);
cached_results.write(conv2d_result_cache_name);
}
} // if (!cached_result_loaded)
uint32_t tensor_D_hash = TensorHash(tensor_D_computed.host_view());
if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) {
passed = (tensor_D_hash == cached_test_result.D);
EXPECT_EQ(tensor_D_hash, cached_test_result.D)
<< "Hash-based comparison failed for key:" << "\n" << cached_test_key << "\n";
}
else {
passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view());
}
EXPECT_TRUE(passed);
std::stringstream ss_problem_size_text;
ss_problem_size_text << "nhwc_"
<< problem_size.N << "x"
<< problem_size.H << "x"
<< problem_size.W << "x"
<< problem_size.C
<< "_krsc_"
<< problem_size.K << "x"
<< problem_size.R << "x"
<< problem_size.S << "x"
<< problem_size.C
<< "_padding_"
<< problem_size.pad_h << "x"
<< problem_size.pad_w
<< "_stride_"
<< problem_size.stride_h << "x"
<< problem_size.stride_w
<< "_dilation_"
<< problem_size.dilation_h << "x"
<< problem_size.dilation_w << "_"
<< (problem_size.mode == cutlass::conv::Mode::kCrossCorrelation ? "xcorr_" : "conv_");
if (!passed) {
std::stringstream fname;
fname << "error_Conv2d_DirectConv_device_"
<< (split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial_reduction_" : "parallel_reduction_")
<< (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kFprop ? "fprop_" :
(Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ? "dgrad_" : "wgrad_"))
<< ss_problem_size_text.str()
<< Conv2d::ThreadblockShape::kM << "x"
<< Conv2d::ThreadblockShape::kN << "x"
<< Conv2d::ThreadblockShape::kK << "_"
<< Conv2d::WarpShape::kM << "x"
<< Conv2d::WarpShape::kN << "x"
<< Conv2d::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nB:\n" << tensor_B.host_view() << "\n"
<< "\nC:\n" << tensor_C.host_view() << "\n";
results << "\nD reference (hash: " << cached_test_result.D << ")\n";
if (!cached_result_loaded) {
results
<< tensor_D_reference.host_view() << "\n";
}
results
<< "\nD computed (hash: " << tensor_D_hash << ")\n"
<< tensor_D_computed.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename DirectConv>
bool TestSpecificDepthwiseDirectConv2d(const Conv2dProblemVector &problem_sizes) {
bool passed = true;
//
// Testbed object
//
TestbedDepthwiseDirectConv2d<DirectConv> testbed;
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for (auto conv_problem : problem_sizes) {
//
// Test
//
// test mode = xcross
passed = testbed.run(
conv_problem,
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
// test mode = convolution
passed = testbed.run(
conv_problem.reset_mode(cutlass::conv::Mode::kConvolution),
cutlass::conv::SplitKMode::kSerial);
if (!passed) {
return false;
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace conv
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/conv/device/depthwise_conv2d_direct_conv_testbed.h/0 | {
"file_path": "test/unit/conv/device/depthwise_conv2d_direct_conv_testbed.h",
"repo_id": "test",
"token_count": 8266
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "../common/cutlass_unit_test.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorRef, basic_rank2) {
int const M = 8;
int const N = 16;
int matrix_data[M * N] = {0};
cutlass::TensorRef<
int,
cutlass::IdentityTensorLayout<2> > matrix_ref(matrix_data, cutlass::make_Coord(N, 1));
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
matrix_ref.at(cutlass::make_Coord(m, n)) = m * N + n;
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
EXPECT_EQ(matrix_data[m * N + n], int(m * N + n));
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorRef, rank2_column_major) {
int const M = 8;
int const N = 8;
int matrix_data[M * N];
cutlass::TensorRef<int, cutlass::layout::ColumnMajor> ref(matrix_data, M);
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
ref.at(cutlass::make_Coord(m, n)) = m * N + n;
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
EXPECT_EQ(matrix_data[m + n * M], int(m * N + n));
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorRef, rank2_row_major) {
int const M = 8;
int const N = 16;
int matrix_data[M * N] = { 0 };
cutlass::TensorRef<int, cutlass::layout::RowMajor> ref(matrix_data, N);
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
ref.at(cutlass::make_Coord(m, n)) = m * N + n;
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
EXPECT_EQ(matrix_data[m * N + n], int(m * N + n));
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorRef, rank2_contiguous_dynamic) {
int const M = 8;
int const N = 16;
typedef cutlass::TensorRef<int, cutlass::layout::ContiguousMatrix> ContiguousTensorRef;
cutlass::layout::Matrix layouts[] = {
cutlass::layout::Matrix::kColumnMajor,
cutlass::layout::Matrix::kRowMajor
};
for (int i = 0; i < 2; ++i) {
int matrix_data[M * N] = { 0 };
int row_stride;
int col_stride;
if (layouts[i] == cutlass::layout::Matrix::kColumnMajor) {
row_stride = 1;
col_stride = M;
}
else {
row_stride = N;
col_stride = 1;
}
// Use helper to determine stride vector from leading dimension
ContiguousTensorRef ref(
matrix_data,
cutlass::layout::ContiguousMatrix::packed(cutlass::make_Coord(M, N), layouts[i]));
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
ref.at(cutlass::make_Coord(m, n)) = m * N + n;
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
EXPECT_EQ(matrix_data[m * row_stride + n * col_stride], int(m * N + n));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorRef, rank2_column_major_interleaved) {
int const M = 16;
int const N = 16;
int const kInterleave = 4;
int matrix_data[M * N] = {0};
// Define the Layout for a column-major interleaved matrix format
using Layout = cutlass::layout::ColumnMajorInterleaved<kInterleave>;
// Construct a TensorRef
cutlass::TensorRef<
int,
Layout> ref(matrix_data, Layout::packed(cutlass::make_Coord(M, N)));
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
ref.at(cutlass::make_Coord(m, n)) = m + n * M;
}
}
// Verify
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; n += kInterleave) {
for (int i = 0; i < kInterleave; ++i) {
EXPECT_EQ(matrix_data[m * kInterleave + n * M + i], int(m + (n + i) * M));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorRef, rank2_row_major_interleaved) {
int const M = 16;
int const N = 16;
int const kInterleave = 4;
int matrix_data[M * N] = {0};
// Define the Layout for a row-major interleaved matrix format
using Layout = cutlass::layout::RowMajorInterleaved<kInterleave>;
// Construct a TensorRef
cutlass::TensorRef<
int,
Layout> ref(matrix_data, Layout::packed(cutlass::make_Coord(M, N)));
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
ref.at(cutlass::make_Coord(m, n)) = m + n * M;
}
}
// Verify
for (int m = 0; m < M; m += kInterleave) {
for (int n = 0; n < N; ++n) {
for (int i = 0; i < kInterleave; ++i) {
EXPECT_EQ(matrix_data[m * N + i + n * kInterleave], int((m + i) + n * M));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/core/tensor_ref.cu/0 | {
"file_path": "test/unit/core/tensor_ref.cu",
"repo_id": "test",
"token_count": 2450
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cute/tensor.hpp>
using namespace cute;
template <class Layout>
void
test_coalesce(Layout const& layout)
{
auto coalesce_layout = coalesce(layout);
CUTLASS_TRACE_HOST(shape (layout) << " => " << shape (coalesce_layout));
CUTLASS_TRACE_HOST(stride(layout) << " " << stride(coalesce_layout));
CUTE_STATIC_ASSERT_V(depth(coalesce_layout) <= Int<1>{});
ASSERT_EQ(size(coalesce_layout), size(layout));
for (int i = 0; i < size(layout); ++i) {
EXPECT_EQ(coalesce_layout(i), layout(i));
}
}
TEST(CuTe_core, Coalesce)
{
{
auto layout = make_layout(Int<1>{}, Int<0>{});
test_coalesce(layout);
}
{
auto layout = make_layout(Int<1>{}, Int<1>{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}, Int<6>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape (Int<2>{}, Int<1>{}, Int<6>{}),
make_stride(Int<1>{}, Int<6>{}, Int<2>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape (Int<2>{}, Int<1>{}, Int<6>{}),
make_stride(Int<1>{}, 7, Int<2>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape (Int<2>{}, Int<1>{}, Int<6>{}),
make_stride(Int<4>{}, 7, Int<8>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(2, Int<4>{}, Int<6>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 4, Int<6>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}, 6));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}, Int<6>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(2, Int<4>{}, Int<6>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 4, Int<6>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}, 6), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<1>{}, Int<3>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 1, Int<3>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 1, Int<3>{}), make_stride(Int<2>{}, 4, Int<4>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 1, Int<3>{}), make_stride(Int<2>{}, Int<0>{}, Int<4>{}));
test_coalesce(layout);
}
{
auto layout = Layout<Shape<Shape<_2,_2>,Shape<_2, _2>>,
Stride<Stride<_1,_4>,Stride<_8,_32>>>{};
test_coalesce(layout);
}
}
| test/unit/cute/core/coalesce.cpp/0 | {
"file_path": "test/unit/cute/core/coalesce.cpp",
"repo_id": "test",
"token_count": 1874
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include "cutlass/trace.h"
#include "cute/algorithm/tuple_algorithms.hpp"
#include "cute/container/array.hpp"
#include "cute/container/tuple.hpp"
TEST(CuTe_core, Reverse_Tuple)
{
using cute::get;
{
const auto t = cute::make_tuple();
[[maybe_unused]] auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 0);
}
{
const auto t = cute::make_tuple(123);
[[maybe_unused]] auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 1);
EXPECT_EQ(get<0>(t_r), 123);
}
{
const auto t = cute::make_tuple(123, 456);
[[maybe_unused]] auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 2);
EXPECT_EQ(get<0>(t_r), 456);
EXPECT_EQ(get<1>(t_r), 123);
}
{
const auto t = cute::make_tuple(1, 2, 3, 4, 5);
auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 5);
EXPECT_EQ(get<0>(t_r), 5);
EXPECT_EQ(get<1>(t_r), 4);
EXPECT_EQ(get<2>(t_r), 3);
EXPECT_EQ(get<3>(t_r), 2);
EXPECT_EQ(get<4>(t_r), 1);
}
{
const auto t = cute::make_tuple(cute::Int<1>{}, cute::Int<2>{}, 3);
auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 3);
static_assert(cute::is_same_v<cute::remove_cvref_t<decltype(get<0>(t_r))>, int>);
static_assert(cute::is_same_v<cute::remove_cvref_t<decltype(get<1>(t_r))>, cute::Int<2>>);
static_assert(cute::is_same_v<cute::remove_cvref_t<decltype(get<2>(t_r))>, cute::Int<1>>);
EXPECT_EQ(get<0>(t_r), 3);
EXPECT_EQ(get<1>(t_r), cute::Int<2>{});
EXPECT_EQ(get<2>(t_r), cute::Int<1>{});
}
}
TEST(CuTe_core, Reverse_Array)
{
using cute::get;
{
const auto t = cute::array<int, 0>{};
[[maybe_unused]] auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 0);
using reverse_type = cute::array<int, 0>;
static_assert(cute::is_same_v<decltype(t_r), reverse_type>);
}
{
const auto t = cute::array<int, 1>{123};
[[maybe_unused]] auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 1);
EXPECT_EQ(get<0>(t_r), 123);
using reverse_type = cute::array<int, 1>;
static_assert(cute::is_same_v<decltype(t_r), reverse_type>);
}
{
const auto t = cute::array<int, 2>{123, 456};
[[maybe_unused]] auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 2);
EXPECT_EQ(get<0>(t_r), 456);
EXPECT_EQ(get<1>(t_r), 123);
using reverse_type = cute::array<int, 2>;
static_assert(cute::is_same_v<decltype(t_r), reverse_type>);
}
{
const auto t = cute::array<float, 5>{1.125f, 2.25f, 3.5f, 4.625f, 5.75f};
auto t_r = cute::reverse(t);
static_assert(cute::tuple_size_v<decltype(t_r)> == 5);
EXPECT_EQ(get<0>(t_r), 5.75f);
EXPECT_EQ(get<1>(t_r), 4.625f);
EXPECT_EQ(get<2>(t_r), 3.5f);
EXPECT_EQ(get<3>(t_r), 2.25f);
EXPECT_EQ(get<4>(t_r), 1.125f);
using reverse_type = cute::array<float, 5>;
static_assert(cute::is_same_v<decltype(t_r), reverse_type>);
}
}
| test/unit/cute/core/reverse.cpp/0 | {
"file_path": "test/unit/cute/core/reverse.cpp",
"repo_id": "test",
"token_count": 2010
} | 59 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <cassert>
#include <type_traits>
#include <cute/container/tuple.hpp>
#include <cute/int_tuple.hpp>
template<class T>
class ConvertibleTo {
public:
ConvertibleTo(T val) : val_(val) {}
operator T () const { return val_; }
private:
T val_ = 0;
};
template<class Integral, Integral Value>
using IC = std::integral_constant<Integral, Value>;
TEST(CuTe_core_msvc_compilation, TupleAssignment)
{
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("cute::tuple creation and assignment");
CUTLASS_TRACE_HOST("-------------------------------");
using forty_two_type = IC<int, 42>;
using forty_three_type = IC<size_t, 43>;
using ebo_s_type = cute::detail::EBO<0, forty_two_type>;
[[maybe_unused]] ebo_s_type ebo_s;
static_assert(std::is_same_v<decltype(cute::detail::getv(ebo_s)), forty_two_type>);
using ebo_d_type = cute::detail::EBO<1, size_t>;
[[maybe_unused]] ebo_d_type ebo_d(43u);
assert(ebo_d.t_ == 43u);
static_assert(std::is_same_v<std::remove_const_t<std::remove_reference_t<decltype(cute::detail::getv(ebo_d))>>, size_t > );
assert(cute::detail::getv(ebo_d) == 43u);
[[maybe_unused]] cute::detail::TupleBase<std::index_sequence<0, 1, 2>, int, forty_two_type, size_t> tb0{
41, forty_two_type{}, size_t(43u) };
[[maybe_unused]] cute::detail::TupleBase<std::index_sequence<0, 1, 2>, int, forty_two_type, size_t> tb1;
int val41 = ConvertibleTo{41};
assert(val41 == 41);
size_t val43 = ConvertibleTo{size_t(43u)};
assert(val43 == size_t{43u});
[[maybe_unused]] cute::detail::TupleBase<std::index_sequence<0, 1, 2>, int, forty_two_type, size_t> tb2{
ConvertibleTo{41}, forty_two_type{}, ConvertibleTo{size_t(43u)}};
[[maybe_unused]] cute::detail::TupleBase<std::index_sequence<0>, int> tb3{ 41 };
[[maybe_unused]] cute::detail::TupleBase<std::index_sequence<0>, int> tb3a{ 42 };
tb3 = tb3a;
using tuple_0d_type = cute::tuple<>;
using tuple_1d_d_type = cute::tuple<int>;
using tuple_1d_s_type = cute::tuple<forty_two_type>;
using tuple_2d_dd_type = cute::tuple<int, size_t>;
using tuple_2d_ss_type = cute::tuple<forty_two_type, forty_three_type>;
[[maybe_unused]] tuple_0d_type t0;
// Symptom: "illegal member initialization: 'TupleBase<int>' is not a base or member"
[[maybe_unused]] tuple_1d_d_type t1{ 42 };
[[maybe_unused]] tuple_1d_s_type t2;
[[maybe_unused]] tuple_1d_d_type t1a{ 43 };
t1 = t1a;
[[maybe_unused]] tuple_2d_dd_type t3{ 42, size_t(43u) };
[[maybe_unused]] tuple_2d_ss_type t4;
t3 = t4;
[[maybe_unused]] tuple_2d_dd_type t3a{ 44, size_t(45u) };
// Symptom: "illegal member initialization:
// 'TupleBase<int, unsigned __int64>' is not a base or member"
t3 = t3a;
}
TEST(CuTe_core_msvc_compilation, TupleGetSingleInteger)
{
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("cute::get<I> on cute::tuple for single integer I");
CUTLASS_TRACE_HOST("-------------------------------");
cute::tuple<int, ConvertibleTo<size_t>, IC<int, 43>> t0{ 41, size_t(42u), IC<int, 43>{} };
[[maybe_unused]] auto t0_0 = cute::get<0>(t0);
static_assert(std::is_same_v<decltype(t0_0), int>);
assert(t0_0 == 41);
[[maybe_unused]] auto t0_1 = cute::get<1>(t0);
static_assert(std::is_same_v<decltype(t0_1), ConvertibleTo<size_t>>);
[[maybe_unused]] auto t0_2 = cute::get<2>(t0);
static_assert(std::is_same_v<decltype(t0_2), IC<int, 43>>);
}
TEST(CuTe_core_msvc_compilation, TupleGetRecursive)
{
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("cute::get<I...> on cute::tuple");
CUTLASS_TRACE_HOST("-------------------------------");
using inner_tuple_type = cute::tuple<int, ConvertibleTo<size_t>, IC<int, 43>>;
using outer_tuple_type = cute::tuple<IC<int, 40>, inner_tuple_type, size_t>;
inner_tuple_type t0_inner{ 41, size_t(42u), IC<int, 43>{} };
outer_tuple_type t0_outer{ IC<int, 40>{}, t0_inner, size_t(44u) };
[[maybe_unused]] auto t0_outer_0 = cute::get<0>(t0_outer);
static_assert(std::is_same_v<decltype(t0_outer_0), IC<int, 40>>);
[[maybe_unused]] auto t0_outer_1 = cute::get<1>(t0_outer);
static_assert(std::is_same_v<decltype(t0_outer_1), inner_tuple_type>);
[[maybe_unused]] auto t0_outer_2 = cute::get<2>(t0_outer);
static_assert(std::is_same_v<decltype(t0_outer_2), size_t>);
assert(t0_outer_2 == size_t(44u));
// Leftmost index is innermost in the nexted get sequence.
[[maybe_unused]] auto t0_outer_10 = cute::get<1, 0>(t0_outer);
static_assert(std::is_same_v<decltype(t0_outer_10), int>);
assert(t0_outer_10 == 41);
}
| test/unit/cute/msvc_compilation/tuple.cpp/0 | {
"file_path": "test/unit/cute/msvc_compilation/tuple.cpp",
"repo_id": "test",
"token_count": 2463
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <fstream>
#include <iostream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
struct MultistageTestbed {
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute =
typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
//
// Methods
//
MultistageTestbed(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080)
: init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) {}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind, uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
int scope = (cutlass::sizeof_bits<Element>::value == 8) ? 2 : 8;
cutlass::reference::host::TensorFillRandomUniform(view, seed, scope,
-scope, 0);
} else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, -1);
} else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
} else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(),
view.capacity());
} else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Waives test if CUDA device is insufficient
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waives test if CUDA device is insufficient
if (!sufficient()) {
return true;
}
//
// Allocate the GEMM workspace
//
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA>
tensor_A(problem_size.mk());
cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB>
tensor_B(problem_size.kn());
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC>
tensor_C(problem_size.mn());
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC>
tensor_D(problem_size.mn());
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC>
reference_D(problem_size.mn(), false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
cutlass::reference::host::TensorCopy(reference_D.host_view(),
tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
problem_size, tensor_A.device_ref(), tensor_B.device_ref(),
tensor_C.device_ref(), tensor_D.device_ref(), {alpha, beta}};
Gemm gemm_op;
cutlass::Status status = gemm_op.initialize(arguments);
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return true;
}
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
//
// Verify
//
cutlass::reference::host::Gemm<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
typename Gemm::ElementC, typename Gemm::LayoutC, ElementCompute,
ElementAccumulator, typename Gemm::Operator>
reference_gemm;
reference_gemm(
problem_size, alpha, tensor_A.host_ref(), tensor_B.host_ref(), beta,
reference_D.host_ref(), ElementAccumulator(0));
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(
reference_D.host_view(), tensor_D.host_view());
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Gemm_device_" << problem_size.m() << "x"
<< problem_size.n() << "x" << problem_size.k() << "_"
<< Gemm::ThreadblockShape::kM << "x" << Gemm::ThreadblockShape::kN
<< "x" << Gemm::ThreadblockShape::kK << "_" << Gemm::WarpShape::kM
<< "x" << Gemm::WarpShape::kN << "x" << Gemm::WarpShape::kK
<< ".txt";
std::ofstream file(fname.str());
file << "problem: " << problem_size << ", alpha: " << alpha
<< ", beta: " << beta << "\n\n";
file << "A =\n"
<< tensor_A.host_view() << "\nB =\n"
<< tensor_B.host_view() << "\nC =\n"
<< tensor_C.host_view() << "\n\nReference =\n"
<< reference_D.host_view() << "\nComputed =\n"
<< tensor_D.host_view();
}
return passed;
}
/// Runs a set of problem sizes
bool run_all() {
bool passed = true;
int problem_size_m[] = {16, 528};
int problem_size_n[] = {16, 528};
int problem_size_k[] = {Gemm::InstructionShape::kK,
Gemm::ThreadblockShape::kK * Gemm::kStages +
Gemm::InstructionShape::kK};
double problem_alpha[] = {1.0};
// TODO Try non zero beta value after multistaged epilogue is implemented
double problem_beta[] = {0.0};
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (double alpha : problem_alpha) {
for (double beta : problem_beta) {
passed =
run({m, n, k}, ElementCompute(alpha), ElementCompute(beta));
if (!passed) {
return false;
}
}
}
}
}
}
return true;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/multistage_testbed.h/0 | {
"file_path": "test/unit/gemm/device/multistage_testbed.h",
"repo_id": "test",
"token_count": 3922
} | 61 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for Sm90 f16_f16_f16 with cooperative EVT epilogue
D = alpha * acc + beta * c + aux_load
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "../../common/cutlass_unit_test.h"
#include "gemm_testbed_3x_evt.hpp"
#include "sm90_evt_operations.hpp"
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
namespace test::gemm::device {
template <class ElementCompute, class ElementAccumulator, bool IsCNeed>
static constexpr auto select_evt_d() {
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
using BinaryCompute0 = Sm90EVT<Sm90Compute<
cutlass::multiplies,
ElementCompute,
ElementCompute,
RoundStyle>, // alpha * acc
Sm90ScalarBroadcast<ElementAccumulator>, // alpha
Sm90AccFetch // acc
>;
if constexpr (IsCNeed) {
using EVT_D = Sm90EVT<Sm90Compute<cutlass::homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>,
Sm90ScalarBroadcast<ElementAccumulator>, // beta
Sm90SrcFetch<ElementCompute>, // C
BinaryCompute0>;
return EVT_D{};
} else {
return BinaryCompute0{};
}
}
template <class Gemm, class GemmWithoutD>
bool testEVTAuxStoreWithoutD() {
using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;
int max_alignment = std::max(Gemm::kAlignmentA, Gemm::kAlignmentB);
std::vector<int> problem_size_m = {max_alignment, 512 - 3 * max_alignment};
std::vector<int> problem_size_n = {max_alignment, 512 - 2 * max_alignment};
if constexpr (std::is_same_v<typename Gemm::GemmKernel::DispatchPolicy::Schedule,
cutlass::gemm::KernelTmaWarpSpecializedPingpong>) {
problem_size_m.push_back(768);
problem_size_n.push_back(768);
}
using GemmKernel = typename Gemm::GemmKernel;
constexpr int Stages = Gemm::GemmKernel::DispatchPolicy::Stages;
constexpr int TileShapeK = cute::size<2>(typename Gemm::GemmKernel::TileShape{});
std::vector<int> problem_size_k = {max_alignment, TileShapeK * (Stages + 1) - max_alignment};
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementD = typename Gemm::ElementD;
constexpr bool has_c = not cute::is_void_v<ElementC>;
cutlass::DeviceAllocation<ElementA> A_block;
cutlass::DeviceAllocation<ElementB> B_block;
cutlass::DeviceAllocation<cute::conditional_t<has_c, ElementC, ElementD>> C_block;
cutlass::DeviceAllocation<ElementD> D_block;
cutlass::DeviceAllocation<ElementD> aux_store_D_block;
cutlass::DeviceAllocation<uint8_t> workspace;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
ProblemShapeType problem_size;
int l = 1;
problem_size = ProblemShapeType{m, n, k, l};
// Run Base Gemm to get reference D
A_block.reset(m * k);
B_block.reset(k * n);
C_block.reset(m * n);
D_block.reset(m * n);
aux_store_D_block.reset(m * n);
Gemm gemm_op_base;
auto stride_A = cutlass::make_cute_packed_stride(
typename GemmKernel::StrideA{}, cute::make_shape(m, k, cute::Int<1>{}));
auto stride_B = cutlass::make_cute_packed_stride(
typename GemmKernel::StrideB{}, cute::make_shape(n, k, cute::Int<1>{}));
auto stride_C = cutlass::make_cute_packed_stride(
typename GemmKernel::StrideC{}, cute::make_shape(m, n, cute::Int<1>{}));
auto stride_D = cutlass::make_cute_packed_stride(
typename GemmKernel::StrideD{}, cute::make_shape(m, n, cute::Int<1>{}));
auto arguments_base = typename Gemm::Arguments {
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size,
{
A_block.get(), stride_A,
B_block.get(), stride_B
},
{ // Epilogue arguments
{}, // thread
has_c ? C_block.get() : nullptr, stride_C,
D_block.get(), stride_D,
}, // Epilogue arguments end
/*hw_info=*/{},
/*scheduler_args=*/{}
};
// check without D aux store
// set D to be void and use Sm90AuxStore to write to D
// and then the D is the same
GemmWithoutD gemm_op;
auto arguments = typename GemmWithoutD::Arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size,
{
A_block.get(), stride_A,
B_block.get(), stride_B
},
{ // Epilogue arguments
{}, // thread
has_c ? C_block.get() : nullptr, stride_C,
nullptr, stride_D,
}, // Epilogue arguments end
/*hw_info=*/{},
/*scheduler_args=*/{}
};
constexpr float beta [[maybe_unused]] = 1.0;
constexpr float alpha [[maybe_unused]] = 1.0;
using ElementC = typename GemmWithoutD::ElementC;
if constexpr (not has_c) {
arguments_base.epilogue.thread = {
// binary op : alpha * acc
{{alpha}}, // leaf op+args : alpha
{}, // leaf op+args : acc
{} // binary args : multiplies
};
arguments.epilogue.thread = {
// unary op: aux store D
{
// binary op : alpha * acc
{{alpha}}, // leaf op+args : alpha
{}, // leaf op+args : acc
{} // binary args : multiplies
},
{aux_store_D_block.get(), stride_D}
};
} else {
arguments_base.epilogue.thread = {
// ternary op : beta * C + (alpha * acc)
{{beta}}, // leaf op+args : beta
{}, // op+args : C
{
// binary op : alpha * acc
{{alpha}}, // leaf op+args : alpha
{}, // leaf op+args : acc
{} // binary args : multiplies
}, // end binary op
{} // ternary args : multiply_add
};
arguments.epilogue.thread = {
// unary op: aux store D
{
// ternary op : beta * C + (alpha * acc)
{{beta}}, // leaf op+args : beta
{}, // op+args : C
{
// binary op : alpha * acc
{{alpha}}, // leaf op+args : alpha
{}, // leaf op+args : acc
{} // binary args : multiplies
}, // end binary op
{} // ternary args : multiply_add
},
{aux_store_D_block.get(), stride_D}
};
}
cutlass::Status status;
cudaError_t result;
status = gemm_op_base.can_implement(arguments_base);
EXPECT_EQ(status, cutlass::Status::kSuccess) << "Error gemm base not supported";
size_t workspace_size_base = Gemm::get_workspace_size(arguments_base);
workspace.reset(workspace_size_base);
status = gemm_op_base.initialize(arguments_base, workspace.get());
status = gemm_op_base.run();
result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "Error at Base Kernel Sync.";
size_t workspace_size = GemmWithoutD::get_workspace_size(arguments);
workspace.reset(workspace_size);
status = gemm_op.can_implement(arguments);
EXPECT_EQ(status, cutlass::Status::kSuccess);
status = gemm_op.initialize(arguments, workspace.get());
status = gemm_op.run();
result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "Error at Kernel Sync.";
bool passed = cutlass::reference::device::BlockCompareEqual(aux_store_D_block.get(), D_block.get(), m * n);
if (!passed) {
return false;
}
}
}
}
return true;
}
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_VoidC_VoidD_AuxStoreF16_RowMajor) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule
>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t
>;
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
constexpr bool has_c = false;
using EVT_D = decltype(test::gemm::device::select_evt_d<cutlass::half_t, float, has_c>());
using AuxStore = Sm90AuxStore<AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxStoreDescriptor::Element, RoundStyle,
typename AuxStoreDescriptor::Stride, typename AuxStoreDescriptor::SmemLayoutAtom,
typename AuxStoreDescriptor::CopyOpR2S>;
constexpr auto select_kernel = [](auto has_c, auto has_d) {
using FusionCallbacks =
cute::conditional_t<decltype(has_d){}, EVT_D, Sm90EVT<AuxStore, EVT_D>>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cute::conditional_t<decltype(has_c){}, cutlass::half_t, void>, LayoutC, 8,
cute::conditional_t<decltype(has_d){}, cutlass::half_t, void>, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue>;
return GemmKernel{};
};
using GemmKernel = decltype(select_kernel(cute::C<has_c>{}, cute::C<true>{}));
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using GemmKernelWithoutD = decltype(select_kernel(cute::C<has_c>{}, cute::C<false>{}));
using GemmWithoutD = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelWithoutD>;
bool passed = test::gemm::device::testEVTAuxStoreWithoutD<Gemm, GemmWithoutD>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_VoidC_VoidD_AuxStoreF16_ColumnMajor) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule
>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::ColumnMajor, cutlass::half_t
>;
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
constexpr bool has_c = false;
using EVT_D = decltype(test::gemm::device::select_evt_d<cutlass::half_t, float, has_c>());
using AuxStore = Sm90AuxStore<AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxStoreDescriptor::Element, RoundStyle,
typename AuxStoreDescriptor::Stride, typename AuxStoreDescriptor::SmemLayoutAtom,
typename AuxStoreDescriptor::CopyOpR2S>;
constexpr auto select_kernel = [](auto has_c, auto has_d) {
using FusionCallbacks =
cute::conditional_t<decltype(has_d){}, EVT_D, Sm90EVT<AuxStore, EVT_D>>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cute::conditional_t<decltype(has_c){}, cutlass::half_t, void>, LayoutC, 8,
cute::conditional_t<decltype(has_d){}, cutlass::half_t, void>, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue>;
return GemmKernel{};
};
using GemmKernel = decltype(select_kernel(cute::C<has_c>{}, cute::C<true>{}));
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using GemmKernelWithoutD = decltype(select_kernel(cute::C<has_c>{}, cute::C<false>{}));
using GemmWithoutD = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelWithoutD>;
bool passed = test::gemm::device::testEVTAuxStoreWithoutD<Gemm, GemmWithoutD>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 128x128x64_2x2x1_VoidC_VoidD_AuxStoreF32_RowMajor) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule
>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t
>;
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
constexpr bool has_c = false;
using EVT_D = decltype(test::gemm::device::select_evt_d<cutlass::half_t, float, has_c>());
using AuxStore = Sm90AuxStore<AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxStoreDescriptor::Element, RoundStyle,
typename AuxStoreDescriptor::Stride, typename AuxStoreDescriptor::SmemLayoutAtom,
typename AuxStoreDescriptor::CopyOpR2S>;
constexpr auto select_kernel = [](auto has_c, auto has_d) {
using FusionCallbacks =
cute::conditional_t<decltype(has_d){}, EVT_D, Sm90EVT<AuxStore, EVT_D>>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cute::conditional_t<decltype(has_c){}, cutlass::half_t, void>, LayoutC, 8,
cute::conditional_t<decltype(has_d){}, cutlass::half_t, void>, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue>;
return GemmKernel{};
};
using GemmKernel = decltype(select_kernel(cute::C<has_c>{}, cute::C<true>{}));
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using GemmKernelWithoutD = decltype(select_kernel(cute::C<has_c>{}, cute::C<false>{}));
using GemmWithoutD = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelWithoutD>;
bool passed = test::gemm::device::testEVTAuxStoreWithoutD<Gemm, GemmWithoutD>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_WithC_VoidD_AuxStoreF16_RowMajor) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule
>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t
>;
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
constexpr bool has_c = true;
using EVT_D = decltype(test::gemm::device::select_evt_d<cutlass::half_t, float, has_c>());
using AuxStore = Sm90AuxStore<AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxStoreDescriptor::Element, RoundStyle,
typename AuxStoreDescriptor::Stride, typename AuxStoreDescriptor::SmemLayoutAtom,
typename AuxStoreDescriptor::CopyOpR2S>;
constexpr auto select_kernel = [](auto has_c, auto has_d) {
using FusionCallbacks =
cute::conditional_t<decltype(has_d){}, EVT_D, Sm90EVT<AuxStore, EVT_D>>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cute::conditional_t<decltype(has_c){}, cutlass::half_t, void>, LayoutC, 8,
cute::conditional_t<decltype(has_d){}, cutlass::half_t, void>, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue>;
return GemmKernel{};
};
using GemmKernel = decltype(select_kernel(cute::C<has_c>{}, cute::C<true>{}));
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using GemmKernelWithoutD = decltype(select_kernel(cute::C<has_c>{}, cute::C<false>{}));
using GemmWithoutD = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelWithoutD>;
bool passed = test::gemm::device::testEVTAuxStoreWithoutD<Gemm, GemmWithoutD>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_WithC_VoidD_AuxStoreF16_ColumnMajor) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using TileShape_MNK = Shape<_256,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule
>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::ColumnMajor, cutlass::half_t
>;
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
constexpr bool has_c = true;
using EVT_D = decltype(test::gemm::device::select_evt_d<cutlass::half_t, float, has_c>());
using AuxStore = Sm90AuxStore<AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxStoreDescriptor::Element, RoundStyle,
typename AuxStoreDescriptor::Stride, typename AuxStoreDescriptor::SmemLayoutAtom,
typename AuxStoreDescriptor::CopyOpR2S>;
constexpr auto select_kernel = [](auto has_c, auto has_d) {
using FusionCallbacks =
cute::conditional_t<decltype(has_d){}, EVT_D, Sm90EVT<AuxStore, EVT_D>>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cute::conditional_t<decltype(has_c){}, cutlass::half_t, void>, LayoutC, 8,
cute::conditional_t<decltype(has_d){}, cutlass::half_t, void>, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue>;
return GemmKernel{};
};
using GemmKernel = decltype(select_kernel(cute::C<has_c>{}, cute::C<true>{}));
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using GemmKernelWithoutD = decltype(select_kernel(cute::C<has_c>{}, cute::C<false>{}));
using GemmWithoutD = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelWithoutD>;
bool passed = test::gemm::device::testEVTAuxStoreWithoutD<Gemm, GemmWithoutD>();
EXPECT_TRUE(passed);
}
TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 128x128x64_2x2x1_WithC_VoidD_AuxStoreF32_RowMajor) {
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using TileShape_MNK = Shape<_128,_128,_64>;
using ClusterShape_MNK = Shape<_2,_2,_1>;
using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule
>;
using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t
>;
using namespace cutlass::epilogue::fusion;
constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
constexpr bool has_c = true;
using EVT_D = decltype(test::gemm::device::select_evt_d<cutlass::half_t, float, has_c>());
using AuxStore = Sm90AuxStore<AuxStoreDescriptor::Stages, typename EpilogueDescriptor::EpilogueTile,
typename AuxStoreDescriptor::Element, RoundStyle,
typename AuxStoreDescriptor::Stride, typename AuxStoreDescriptor::SmemLayoutAtom,
typename AuxStoreDescriptor::CopyOpR2S>;
constexpr auto select_kernel = [](auto has_c, auto has_d) {
using FusionCallbacks =
cute::conditional_t<decltype(has_d){}, EVT_D, Sm90EVT<AuxStore, EVT_D>>;
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape_MNK, ClusterShape_MNK,
EpilogueTileType,
float, float,
cute::conditional_t<decltype(has_c){}, cutlass::half_t, void>, LayoutC, 8,
cute::conditional_t<decltype(has_d){}, cutlass::half_t, void>, LayoutC, 8,
EpilogueSchedule,
FusionCallbacks
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
cutlass::half_t, LayoutA, 8,
cutlass::half_t, LayoutB, 8,
float,
TileShape_MNK, ClusterShape_MNK,
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::KernelTmaWarpSpecializedCooperative
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue>;
return GemmKernel{};
};
using GemmKernel = decltype(select_kernel(cute::C<has_c>{}, cute::C<true>{}));
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
using GemmKernelWithoutD = decltype(select_kernel(cute::C<has_c>{}, cute::C<false>{}));
using GemmWithoutD = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelWithoutD>;
bool passed = test::gemm::device::testEVTAuxStoreWithoutD<Gemm, GemmWithoutD>();
EXPECT_TRUE(passed);
}
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
| test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_aux_store.cu/0 | {
"file_path": "test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_aux_store.cu",
"repo_id": "test",
"token_count": 12184
} | 62 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
Testbed for sparse operations not to be released for CUDA 11.0 GA. Expected release is 11.1.
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/host_uncompress.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
struct SparseTestbed {
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
static int const kSparse = Gemm::GemmKernel::kSparse;
static int const kMetaSizeInBits = Gemm::GemmKernel::kMetaSizeInBits;
static int const kMaxID2 = Gemm::GemmKernel::kMaxID2;
static int const kElementsPerElementE = Gemm::GemmKernel::kElementsPerElementE;
using ElementE = typename Gemm::GemmKernel::ElementE;
using LayoutE = cutlass::layout::RowMajor;
using ReorderedLayoutE = typename Gemm::GemmKernel::LayoutE;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
cutlass::Distribution::Kind init_E;
uint64_t seed;
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A;
cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A_uncompressed;
cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB> tensor_B;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_C;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_D;
cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> reference_D;
cutlass::HostTensor<ElementE, LayoutE> tensor_E;
cutlass::HostTensor<ElementE, ReorderedLayoutE> tensor_E_reordered;
//
// Methods
//
SparseTestbed(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_E_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080)
: init_A(init_A_),
init_B(init_B_),
init_C(init_C_),
init_E(init_E_),
seed(seed_) {}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the GEMM workspace
//
tensor_A.resize(cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse));
tensor_A_uncompressed.resize(problem_size.mk());
tensor_B.resize(problem_size.kn());
tensor_C.resize(problem_size.mn());
tensor_D.resize(problem_size.mn());
reference_D.resize(problem_size.mn(), false);
tensor_E.resize(cutlass::make_Coord(
problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
tensor_E_reordered.resize(cutlass::make_Coord(
problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
if (init_E == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomSparseMeta(
tensor_E.host_view(), seed, kMetaSizeInBits);
} else if (init_E == cutlass::Distribution::Identity) {
uint32_t content = (kMaxID2 == 1) ? 0x44444444 : 0x4444;
cutlass::reference::host::TensorFill(tensor_E.host_view(),
(ElementE)(content));
} else {
EXPECT_TRUE(false);
}
cutlass::reorder_meta(tensor_E_reordered.host_ref(), tensor_E.host_ref(),
{problem_size.m(), problem_size.n(),
problem_size.k() / kSparse / kElementsPerElementE});
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename Gemm::ElementA(1);
tensor_B.host_view().at({0, 0}) = typename Gemm::ElementB(1);
tensor_C.host_view().at({0, 0}) = typename Gemm::ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
tensor_E_reordered.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
if (tensor_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
if (reference_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view());
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Gemm_device_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Gemm::ThreadblockShape::kM << "x"
<< Gemm::ThreadblockShape::kN << "x"
<< Gemm::ThreadblockShape::kK << "_"
<< Gemm::WarpShape::kM << "x"
<< Gemm::WarpShape::kN << "x"
<< Gemm::WarpShape::kK << ".txt";
std::ofstream file(fname.str());
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\nE =\n" << tensor_E.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\nComputed =\n" << tensor_D.host_view();
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
cutlass::uncompress(tensor_A_uncompressed.host_ref(), tensor_A.host_ref(),
tensor_E.host_ref(), problem_size.m(), problem_size.k());
cutlass::reference::host::Gemm<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
typename Gemm::ElementC, typename Gemm::LayoutC,
ElementCompute,
ElementAccumulator, typename Gemm::Operator>
reference_gemm;
reference_gemm(
problem_size,
alpha,
tensor_A_uncompressed.host_ref(),
tensor_B.host_ref(),
beta,
reference_D.host_ref(),
ElementAccumulator(0)
);
return compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmCoord problem_size,
int split_k_slices = 1,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D.device_ref(),
tensor_E_reordered.device_ref(),
{alpha, beta},
split_k_slices
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
// This failure is likely due to insufficient device capabilities. Waive the test.
if (status != cutlass::Status::kSuccess) {
return true;
}
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Error with split_k_slices = " << split_k_slices << ", alpha: " << alpha << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
bool TestAllSparseGemm() {
bool passed = true;
int const kMinimumOperandElementSize =
std::min(
int(cutlass::sizeof_bits<typename Gemm::ElementA>::value),
int(cutlass::sizeof_bits<typename Gemm::ElementB>::value));
// M dimension has to be multiple of 32 (sparse float) or 16 (sparse int)
// because of the reordering of operand E
int const kAlignmentM = std::max(((sizeof(typename Gemm::ElementE) == 2) ? 32 : 16),
kMinimumOperandElementSize);
int const kAlignmentN = 128 / kMinimumOperandElementSize;
int problem_size_m[] = {kAlignmentM, 512 - 3 * kAlignmentM};
int problem_size_n[] = {kAlignmentN, 512 - 2 * kAlignmentN};
int problem_size_k[] = {Gemm::ThreadblockShape::kK,
Gemm::ThreadblockShape::kK * (Gemm::kStages + 1)};
int split_k_slices[] = {
1, 2, 3
};
double problem_alpha[] = {
1
};
double problem_beta[] = {
2.0
};
SparseTestbed<Gemm> testbed;
using ElementCompute = typename Gemm::EpilogueOutputOp::ElementCompute;
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (int split_k : split_k_slices) {
if (!Gemm::kSplitKSerial && split_k > 1) {
continue;
}
if (split_k > 1 && k / Gemm::ThreadblockShape::kK < split_k) {
continue;
}
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
cutlass::gemm::GemmCoord problem_size(m, n, k);
passed = testbed.run(
problem_size,
split_k,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_sparse.h/0 | {
"file_path": "test/unit/gemm/device/testbed_sparse.h",
"repo_id": "test",
"token_count": 6258
} | 63 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit test for the OrderedSequenceBarrier class
*/
#include "../common/cutlass_unit_test.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/arch/cluster_sm90.hpp>
#include <cutlass/util/reference/host/gemm.h>
#include <cutlass/cluster_launch.hpp>
#include "cutlass/core_io.h"
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "testbed.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/arch/barrier.h"
#include "cute/arch/cluster_sm90.hpp"
using namespace cute;
//////////////////// KERNEL /////////////////////////
template<typename OrderedSequencer>
struct SharedStorage
{
typename OrderedSequencer::SharedStorage storage;
};
// Goal of this kernel is to complete deadlock-free
template<int Stages, int GroupCount, int ThreadsPerGroup>
__global__ static
void ordered_sequence_device(uint32_t const num_iterations)
{
extern __shared__ char shared_memory[];
using SequenceBarrier = typename cutlass::OrderedSequenceBarrier<Stages, GroupCount>;
using SmemStorage = SharedStorage<SequenceBarrier>;
SmemStorage& shared_storage = *reinterpret_cast<SmemStorage*>(shared_memory);
int group_idx = threadIdx.x / ThreadsPerGroup;
typename SequenceBarrier::Params params;
params.group_id = group_idx; // sequence ID
params.group_size = ThreadsPerGroup; // Number of threads / participants in a group
SequenceBarrier barrier(shared_storage.storage, params);
// Ensure All CTAs in Cluster have completed init before issuing commits
__syncthreads();
cute::cluster_arrive_relaxed();
cute::cluster_wait();
CUTLASS_PRAGMA_NO_UNROLL
for (int i = 0; i < num_iterations; ++i){
barrier.wait();
// STAGE 1 CODE...
#ifndef NDEBUG
int thread_idx_in_group = threadIdx.x % ThreadsPerGroup;
if (thread_idx_in_group == 0) {
printf("STAGE 0 : Group_IDX : %d, id = %d, iter = %d, tidx = %d\n", group_idx, params.group_id, i, threadIdx.x);
}
#endif
// Simulates long running stage
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
__nanosleep(100000);
#endif
barrier.arrive();
barrier.wait();
// STAGE 2 CODE...
#ifndef NDEBUG
if (thread_idx_in_group == 0) {
printf("STAGE 1 : Group_IDX : %d, id = %d, iter = %d, tidx = %d\n", group_idx, params.group_id, i, threadIdx.x);
}
#endif
// Simulates long running stage
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
__nanosleep(100000);
#endif
barrier.arrive();
}
// To make sure remote SMEM doesn't get destroyed
cute::cluster_arrive();
cute::cluster_wait();
}
/////////////////////////////////////////////////////
template<uint32_t Stages_, uint32_t GroupCount_>
struct PipelineTest {
//
// Data members
//
static constexpr uint32_t ThreadsPerGroup = 128;
static constexpr uint32_t BlockSize = GroupCount_ * ThreadsPerGroup;
static constexpr uint32_t Stages = Stages_;
static constexpr uint32_t GroupCount = GroupCount_;
using SequenceBarrier = typename cutlass::OrderedSequenceBarrier<Stages, GroupCount>;
using SmemStorage = SharedStorage<SequenceBarrier>;
//
// Methods
//
// Run CuTe GEMM kernel
cudaError_t run(uint32_t const kNumIters,
cudaStream_t stream = nullptr) {
// Pipeline (multistage pipeline)
auto cluster_shape = Shape<_1, _1, _1>{};
//
// Configure and launch
//
int iterations = 1;
cudaError_t result;
for (int iter = 0; iter < iterations; ++iter) {
int smem_size = int(sizeof(SmemStorage));
result = cudaFuncSetAttribute(
ordered_sequence_device<Stages, GroupCount, ThreadsPerGroup>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
// Launch a single Cluster, with 128 thread per CTA
dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), size<2>(cluster_shape));
dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1);
dim3 dimBlock(BlockSize,1,1);
const void* kernel = (const void*)ordered_sequence_device<Stages, GroupCount, ThreadsPerGroup>;
int iters = kNumIters;
void* kernel_params[] = {reinterpret_cast<void*>(&iters)};
cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params);
} // profiling loop ends
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl;
return result;
}
return cudaSuccess;
}
};
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
TEST(SM90_Verify_OrderedSequence, Depth_2_Length_2) {
Options options;
static constexpr uint32_t GroupCount = 2;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, GroupCount>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_OrderedSequence, Depth_2_Length_3) {
Options options;
static constexpr uint32_t GroupCount = 3;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, GroupCount>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_OrderedSequence, Depth_2_Length_4) {
Options options;
static constexpr uint32_t GroupCount = 4;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, GroupCount>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_OrderedSequence, Depth_2_Length_5) {
Options options;
static constexpr uint32_t GroupCount = 5;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, GroupCount>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
#endif
| test/unit/pipeline/sequence_barrier.cu/0 | {
"file_path": "test/unit/pipeline/sequence_barrier.cu",
"repo_id": "test",
"token_count": 2644
} | 64 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Layout type identifier
enum class LayoutTypeID {
kUnknown,
kColumnMajor,
kRowMajor,
kColumnMajorInterleavedK2,
kRowMajorInterleavedK2,
kColumnMajorInterleavedK4,
kRowMajorInterleavedK4,
kColumnMajorInterleavedK16,
kRowMajorInterleavedK16,
kColumnMajorInterleavedK32,
kRowMajorInterleavedK32,
kColumnMajorInterleavedK64,
kRowMajorInterleavedK64,
kTensorNCHW,
kTensorNCDHW,
kTensorNHWC,
kTensorNDHWC,
kTensorNC32HW32,
kTensorC32RSK32,
kTensorNC64HW64,
kTensorC64RSK64,
kInvalid
};
/// Numeric data type
enum class NumericTypeID {
kUnknown,
kVoid,
kB1,
kU2,
kU4,
kU8,
kU16,
kU32,
kU64,
kS2,
kS4,
kS8,
kS16,
kS32,
kS64,
kFE4M3,
kFE5M2,
kF16,
kBF16,
kTF32,
kF32,
kF64,
kCF16,
kCBF16,
kCF32,
kCTF32,
kCF64,
kCS2,
kCS4,
kCS8,
kCS16,
kCS32,
kCS64,
kCU2,
kCU4,
kCU8,
kCU16,
kCU32,
kCU64,
kInvalid
};
/// Enumerated type describing a transformation on a complex value.
enum class ComplexTransform {
kNone,
kConjugate,
kInvalid
};
/// Providers
enum class Provider {
kNone,
kCUTLASS,
kReferenceHost,
kReferenceDevice,
kCUBLAS,
kCUDNN,
kInvalid
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumeration indicating the kind of operation
enum class OperationKind {
kGemm,
kRankK,
kRank2K,
kTrmm,
kSymm,
kConv2d,
kConv3d,
kEqGemm,
kSparseGemm,
kReduction,
kInvalid
};
/// Enumeration indicating whether scalars are in host or device memory
enum class ScalarPointerMode {
kHost,
kDevice,
kInvalid
};
/// Describes how reductions are performed across threadblocks
enum class SplitKMode {
kNone,
kSerial,
kParallel,
kParallelSerial,
kInvalid
};
/// Indicates the classificaition of the math instruction
enum class OpcodeClassID {
kSimt,
kTensorOp,
kWmmaTensorOp,
kSparseTensorOp,
kInvalid
};
enum class MathOperationID {
kAdd,
kMultiplyAdd,
kMultiplyAddSaturate,
kMultiplyAddMixedInputUpcast,
kMultiplyAddFastBF16,
kMultiplyAddFastF16,
kMultiplyAddFastF32,
kMultiplyAddComplex,
kMultiplyAddComplexFastF32,
kMultiplyAddGaussianComplex,
kXorPopc,
kInvalid
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumeration indicating what kind of GEMM operation to perform
enum class GemmKind {
kGemm,
kSparse,
kUniversal,
kPlanarComplex,
kPlanarComplexArray,
kGrouped,
kInvalid
};
/// Enumeration indicating what kind of RankK update operation to perform
enum class RankKKind {
kUniversal,
kInvalid
};
/// Enumeration indicating what kind of TRMM operation to perform
enum class TrmmKind {
kUniversal,
kInvalid
};
/// Enumeration indicating what kind of SYMM/HEMM operation to perform
enum class SymmKind {
kUniversal,
kInvalid
};
/// Enumeration indicating what kind of Conv2d operation to perform
enum class ConvKind {
kUnknown,
kFprop,
kDgrad,
kWgrad,
kInvalid
};
enum class ConvModeID {
kCrossCorrelation,
kConvolution,
kInvalid
};
// Iterator algorithm enum in order of general performance-efficiency
enum class IteratorAlgorithmID {
kNone,
kAnalytic,
kOptimized,
kFixedChannels,
kFewChannels,
kInvalid
};
enum class EpilogueKind {
kUnknown,
kConversion,
kLinearCombination,
kLinearCombinationClamp,
kLinearCombinationPlanarComplex,
kLinearCombinationRelu,
kLinearCombinationSigmoid,
kInvalid
};
enum class RasterOrder {
kAlongN,
kAlongM,
kHeuristic,
kInvalid
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/include/cutlass/library/types.h/0 | {
"file_path": "tools/library/include/cutlass/library/types.h",
"repo_id": "tools",
"token_count": 1897
} | 65 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief
*/
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "conv_reference_operation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_conv2d_reference_operations(Manifest &manifest) {
make_conv_all<
2,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t,
cutlass::half_t
>(manifest);
make_conv_all<
2,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
float, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::complex<float>, cutlass::layout::TensorNHWC,
cutlass::complex<float>, cutlass::layout::TensorNHWC,
cutlass::complex<float>, cutlass::layout::TensorNHWC,
cutlass::complex<float>,
cutlass::complex<float>
>(manifest);
make_conv_fprop<
2,
int8_t, cutlass::layout::TensorNHWC,
int8_t, cutlass::layout::TensorNHWC,
int32_t, cutlass::layout::TensorNHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
2,
int8_t, cutlass::layout::TensorNHWC,
int8_t, cutlass::layout::TensorNHWC,
int8_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<int8_t, float>
>(manifest);
make_conv_fprop<
2,
uint8_t, cutlass::layout::TensorNHWC,
uint8_t, cutlass::layout::TensorNHWC,
uint8_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<uint8_t, float>
>(manifest);
make_conv_fprop<
2,
uint8_t, cutlass::layout::TensorNHWC,
uint8_t, cutlass::layout::TensorNHWC,
int32_t, cutlass::layout::TensorNHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
2,
uint8_t, cutlass::layout::TensorNHWC,
uint8_t, cutlass::layout::TensorNHWC,
int8_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<int8_t, float>
>(manifest);
make_conv_fprop<
2,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
int32_t, cutlass::layout::TensorNHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
2,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<cutlass::int4b_t, float>
>(manifest);
make_conv_fprop<
2,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
int32_t, cutlass::layout::TensorNHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
2,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<cutlass::uint4b_t, float>
>(manifest);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/reference/conv2d.cu/0 | {
"file_path": "tools/library/src/reference/conv2d.cu",
"repo_id": "tools",
"token_count": 2557
} | 66 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Provides several functions for filling tensors with data.
*/
#pragma once
#include <string>
#include <vector>
#include <map>
#include <iostream>
#include "cutlass/library/library.h"
#define TRACE(x) { std::cout << __FILE__ << ":" << __LINE__ << " " << x << std::endl; }
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
T from_string(std::string const &);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumerated type describing how the performance testbench evaluates kernels.
enum class ExecutionMode {
kProfile, ///< regular verification and profiling
kDryRun, ///< no kernels are launched or workspaces allocated; used to assess what operators might be launched
kEnumerate, ///< no kernels launched or workspaces allocated; lists all operation kind and operations
kTrace, ///< executes a single device-side computation with no other kernel launches
kInvalid
};
/// Converts a ExecutionMode enumerant to a string
char const *to_string(ExecutionMode mode, bool pretty = false);
/// Parses a ExecutionMode enumerant from a string
template <>
ExecutionMode from_string<ExecutionMode>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Library algorithm mode
enum class AlgorithmMode {
kMatching, ///< compare against best matching algorithm
kBest, ///< evaluate all library algorithms and report best
kDefault, ///< use the library's default algorithm option
kInvalid
};
/// Converts a ExecutionMode enumerant to a string
char const *to_string(AlgorithmMode mode, bool pretty = false);
/// Parses a ExecutionMode enumerant from a string
template <>
AlgorithmMode from_string<AlgorithmMode>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Outcome of a performance test
enum class Disposition {
kPassed,
kFailed,
kNotRun,
kIncorrect,
kNotVerified,
kInvalidProblem,
kNotSupported,
kInvalid
};
/// Converts a Disposition enumerant to a string
char const *to_string(Disposition disposition, bool pretty = false);
/// Parses a Disposition enumerant from a string
template <>
Disposition from_string<Disposition>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Indicates when to save
enum class SaveWorkspace {
kNever,
kIncorrect,
kAlways,
kInvalid
};
/// Converts a SaveWorkspace enumerant to a string
char const *to_string(SaveWorkspace save_option, bool pretty = false);
/// Parses a SaveWorkspace enumerant from a string
template <>
SaveWorkspace from_string<SaveWorkspace>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Indicates the type of kernel argument
// ArgumentType can be both ScalarType or NumericType. Thus, enums kScalar and kNumeric
// 1) kScalar: e.g. of a Scalar ArgumentType is u32 is a Scalar type.
// Its c++ equivalent as "type name = initializer" is "u32 m = 32"
// 2) kNumeric: e.g. of a Numeric ArgumentType is NumericTypeID is a Numeric type.
// Its c++ equivalent as "type name = initializer" is "NumericTypeID numeric_type = u32"
enum class ArgumentTypeID {
kScalar,
kInteger,
kTensor,
kBatchedTensor,
kStructure,
kEnumerated,
kInvalid
};
/// Converts a ArgumentTypeID enumerant to a string
char const *to_string(ArgumentTypeID type, bool pretty = false);
/// Parses a ArgumentTypeID enumerant from a string
template <>
ArgumentTypeID from_string<ArgumentTypeID>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
// Profiler typedefs
using ProviderVector = std::vector<library::Provider>;
using DispositionMap = std::map<library::Provider, Disposition>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Print vector for the report
template <typename T>
std::ostream& operator<< (std::ostream& out, const std::vector<T>& v) {
for (size_t i = 0; i < v.size(); ++i) {
out << to_string(v[i], true) << (i + 1u != v.size() ? "," : "");
}
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
| tools/profiler/include/cutlass/profiler/enumerated_types.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/enumerated_types.h",
"repo_id": "tools",
"token_count": 1677
} | 67 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Helper functions for mapping CUTLASS concepts to cuBLAS.
*/
#include <stdexcept>
#if CUTLASS_ENABLE_CUBLAS
#include "cutlass/profiler/cublas_helpers.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Converts a cuBLAS status to cutlass::Status
Status get_cutlass_status(cublasStatus_t cublas) {
switch (cublas) {
case CUBLAS_STATUS_SUCCESS:
return Status::kSuccess;
case CUBLAS_STATUS_INVALID_VALUE:
return Status::kErrorInvalidProblem;
case CUBLAS_STATUS_NOT_SUPPORTED:
return Status::kErrorNotSupported;
default: break;
}
return Status::kErrorInternal;
}
/// Converts a cuBLAS status to cutlass::profiler::Disposition
Disposition get_cutlass_disposition(cublasStatus_t cublas_status) {
if (cublas_status == CUBLAS_STATUS_INVALID_VALUE) {
return Disposition::kInvalidProblem;
}
else if (cublas_status == CUBLAS_STATUS_NOT_SUPPORTED) {
return Disposition::kNotSupported;
}
return Disposition::kFailed;
}
/// Maps a CUTLASS tensor layout to a cuBLAS transpose operation
bool get_cublas_transpose_operation(
cublasOperation_t &operation,
library::LayoutTypeID layout,
library::ComplexTransform transform) {
switch (layout) {
case library::LayoutTypeID::kColumnMajor:
if (transform == library::ComplexTransform::kNone) {
operation = CUBLAS_OP_N;
return true;
}
else {
return false;
}
break;
case library::LayoutTypeID::kRowMajor:
if (transform == library::ComplexTransform::kNone) {
operation = CUBLAS_OP_T;
return true;
}
else if (transform == library::ComplexTransform::kConjugate) {
operation = CUBLAS_OP_C;
return true;
}
break;
default: break;
}
return false;
}
/// Maps a CUTLASS numeric type to a cuBLAS data type enumeration
bool get_cublas_datatype(cublasDataType_t &data_type, library::NumericTypeID element_type) {
switch (element_type) {
case library::NumericTypeID::kFE4M3:
#if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))
data_type = CUDA_R_8F_E4M3;
return true;
#endif
break;
case library::NumericTypeID::kFE5M2:
#if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))
data_type = CUDA_R_8F_E5M2;
return true;
#endif
break;
case library::NumericTypeID::kF16:
data_type = CUDA_R_16F;
return true;
case library::NumericTypeID::kBF16:
data_type = CUDA_R_16BF;
return true;
case library::NumericTypeID::kTF32:
break;
case library::NumericTypeID::kF32:
data_type = CUDA_R_32F;
return true;
case library::NumericTypeID::kF64:
data_type = CUDA_R_64F;
return true;
case library::NumericTypeID::kS4:
break;
case library::NumericTypeID::kS8:
data_type = CUDA_R_8I;
return true;
case library::NumericTypeID::kS16:
break;
case library::NumericTypeID::kS32:
data_type = CUDA_R_32I;
return true;
case library::NumericTypeID::kS64:
break;
case library::NumericTypeID::kU4:
break;
case library::NumericTypeID::kU8:
data_type = CUDA_R_8U;
return true;
case library::NumericTypeID::kU16:
break;
case library::NumericTypeID::kU32:
data_type = CUDA_R_32U;
return true;
case library::NumericTypeID::kU64:
break;
case library::NumericTypeID::kB1:
break;
case library::NumericTypeID::kCF32:
data_type = CUDA_C_32F;
return true;
case library::NumericTypeID::kCF64:
data_type = CUDA_C_64F;
return true;
case library::NumericTypeID::kInvalid:
default:
break;
}
return false;
}
/// Maps a cutlass::SideMode to cuBLAS side mode
bool get_cublas_side_mode(cublasSideMode_t& side, SideMode side_mode) {
switch (side_mode) {
case SideMode::kLeft:
side = CUBLAS_SIDE_LEFT;
return true;
case SideMode::kRight:
side = CUBLAS_SIDE_RIGHT;
return true;
default: break;
}
return false;
}
/// Maps a cutlass::FillMode to cuBLAS fill mode
bool get_cublas_fill_mode(cublasFillMode_t& uplo, FillMode fill_mode) {
switch (fill_mode) {
case FillMode::kLower:
uplo = CUBLAS_FILL_MODE_LOWER;
return true;
case FillMode::kUpper:
uplo = CUBLAS_FILL_MODE_UPPER;
return true;
default: break;
}
return false;
}
/// Maps a cutlass::DiagType to cuBLAS diag type
bool get_cublas_diag_type(cublasDiagType_t& diag, DiagType diag_type) {
switch (diag_type) {
case DiagType::kNonUnit:
diag = CUBLAS_DIAG_NON_UNIT;
return true;
case DiagType::kUnit:
diag = CUBLAS_DIAG_UNIT;
return true;
default: break;
}
return false;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gets the cublas algorithm given threadblock tile dimensions and math opcode class
cublasGemmAlgo_t get_cublas_gemm_algo(int cta_m, int cta_n, int cta_k, library::OpcodeClassID opcode_class) {
return (opcode_class == library::OpcodeClassID::kSimt ?
CUBLAS_GEMM_DEFAULT : CUBLAS_GEMM_DEFAULT_TENSOR_OP);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular GEMM description
Status cublas_satisfies(library::GemmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasGemmExDispatcher::cublasGemmExDispatcher(
library::GemmDescription const &op_desc,
library::GemmUniversalConfiguration configuration_,
library::GemmUniversalArguments arguments_,
cublasGemmAlgo_t algorithm
):
configuration(configuration_), arguments(arguments_), algo(algorithm), status(Status::kSuccess) {
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_transpose_operation(trans_B, op_desc.B.layout, op_desc.transform_B));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_B, op_desc.B.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
op_desc.B.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes GEMM using these arguments
cublasStatus_t cublasGemmExDispatcher::operator()(cublasHandle_t handle) {
if (configuration.mode == library::GemmUniversalMode::kBatched) {
return cublasGemmStridedBatchedEx(
handle,
trans_A,
trans_B,
configuration.problem_size.m(),
configuration.problem_size.n(),
configuration.problem_size.k(),
arguments.alpha,
arguments.A,
data_type_A,
int(configuration.lda),
arguments.batch_stride_A,
arguments.B,
data_type_B,
int(configuration.ldb),
arguments.batch_stride_B,
arguments.beta,
arguments.D,
data_type_C,
int(configuration.ldc),
arguments.batch_stride_C,
configuration.batch_count,
#if (__CUDACC_VER_MAJOR__ >= 11)
compute_type,
#else
compute_data_type,
#endif
algo
);
}
else {
return cublasGemmEx(
handle,
trans_A,
trans_B,
configuration.problem_size.m(),
configuration.problem_size.n(),
configuration.problem_size.k(),
arguments.alpha,
arguments.A,
data_type_A,
int(configuration.lda),
arguments.B,
data_type_B,
int(configuration.ldb),
arguments.beta,
arguments.D,
data_type_C,
int(configuration.ldc),
#if (__CUDACC_VER_MAJOR__ >= 11)
compute_type,
#else
compute_data_type,
#endif
algo
);
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular RankK description
Status cublas_satisfies(library::RankKDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasRankKDispatcher::cublasRankKDispatcher(
library::RankKDescription const &op_desc,
library::RankKConfiguration configuration_,
library::RankKArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
blas_mode = op_desc.blas_mode;
num_ranks = op_desc.num_ranks;
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes RankK using these arguments
cublasStatus_t cublasRankKDispatcher::operator()(cublasHandle_t handle) {
// SYRK and HERK
if (num_ranks == 1) {
if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) {
return cublasDsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasSsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return cublasZherk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasZsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return cublasCherk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasCsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
// SYR2K and HER2K
else if (num_ranks == 2) {
if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) {
return cublasDsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasSsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return cublasZher2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasZsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return cublasCher2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasCsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular TRMM description
Status cublas_satisfies(library::TrmmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.D.element == library::NumericTypeID::kS4 ||
desc.D.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasTrmmDispatcher::cublasTrmmDispatcher(
library::TrmmDescription const &op_desc,
library::TrmmConfiguration configuration_,
library::TrmmArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_side_mode(side, op_desc.side_mode));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_diag_type(diag, op_desc.diag_type));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_B, op_desc.B.element));
good = (good && get_cublas_datatype(data_type_D, op_desc.D.element));
// if A is Transposed, then for cuBLAS that is inverted Fill Mode.
if (trans_A == CUBLAS_OP_T || trans_A == CUBLAS_OP_C) {
if (uplo == CUBLAS_FILL_MODE_LOWER)
uplo = CUBLAS_FILL_MODE_UPPER;
else
uplo = CUBLAS_FILL_MODE_LOWER;
}
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes TRMM using these arguments
cublasStatus_t cublasTrmmDispatcher::operator()(cublasHandle_t handle) {
if (data_type_A == data_type_D && data_type_A == CUDA_R_64F) {
return cublasDtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<double*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasStrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<float*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == CUDA_C_64F) {
return cublasZtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasCtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldd)
);
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular Symm description
Status cublas_satisfies(library::SymmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.B.element == library::NumericTypeID::kBF16 ||
desc.B.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
// only column major layout is supported in cuBLAS
if (desc.A.layout != library::LayoutTypeID::kColumnMajor ||
desc.transform_A != library::ComplexTransform::kNone) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasSymmDispatcher::cublasSymmDispatcher(
library::SymmDescription const &op_desc,
library::SymmConfiguration configuration_,
library::SymmArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
blas_mode = op_desc.blas_mode;
bool good = true;
good = (good && get_cublas_side_mode(side, op_desc.side_mode));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes Symm using these arguments
cublasStatus_t cublasSymmDispatcher::operator()(cublasHandle_t handle) {
// SYMM and HEMM
if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) {
return cublasDsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasSsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return cublasZhemm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasZsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return cublasChemm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasCsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
#endif // #if CUTLASS_ENABLE_CUBLAS
| tools/profiler/src/cublas_helpers.cu/0 | {
"file_path": "tools/profiler/src/cublas_helpers.cu",
"repo_id": "tools",
"token_count": 15733
} | 68 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/profiler/cublas_helpers.h"
#include "cutlass/profiler/sparse_gemm_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
SparseGemmOperationProfiler::SparseGemmOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kSparseGemm,
{
{ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (e.g. sparse, ...)"},
{ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kTensor, {"E"}, "Tensor storing the E operand"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kInteger, {"split_k_slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count"}, "Number of GEMMs computed in one batch"},
}
) {
description_ = " Structured sparse GEMM. D = alpha * A*B + beta * C";
}
/// Destructor
SparseGemmOperationProfiler::~SparseGemmOperationProfiler() {
}
/// Prints usage statement for the math function
void SparseGemmOperationProfiler::print_usage(std::ostream &out) const {
out << "Sparse GEMM" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void SparseGemmOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size:\n"
<< " $ cutlass_profiler --operation=SparseGemm --m=1024 --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=SparseGemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=SparseGemm --accumulator-type=f16,f32\n\n"
<< "Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=SparseGemm --A=f16:column --B=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=SparseGemm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=SparseGemm --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=SparseGemm --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=SparseGemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=SparseGemm \\ \n"
<< " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Status SparseGemmOperationProfiler::SparseGemmProblem::parse(
library::SparseGemmDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.E, "E", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->elements_per_128b =
128 / library::sizeof_bits(operation_desc.A.element);
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout,
{int(this->m), int(this->k) / int(this->sparse)})
.front();
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->k), int(this->n)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->m), int(this->n)}).front();
this->lde =
DeviceAllocation::get_packed_layout(
operation_desc.E.layout,
{int(this->m), int(this->k / this->sparse / this->elements_per_128b)})
.front();
return Status::kSuccess;
}
/// Initializes a performance result
void SparseGemmOperationProfiler::SparseGemmProblem::initialize_result(
PerformanceResult &result,
library::SparseGemmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "E", problem_space,
std::string(library::to_string(operation_desc.E.element)) + ":" + library::to_string(operation_desc.E.layout));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/// Extracts the problem dimensions
Status SparseGemmOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::SparseGemmDescription const &operation_desc =
static_cast<library::SparseGemmDescription const &>(operation->description());
if (operation_desc.gemm_kind != library::GemmKind::kSparse) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
gemm_workspace_.configuration.problem_size.m() = int(problem_.m);
gemm_workspace_.configuration.problem_size.n() = int(problem_.n);
gemm_workspace_.configuration.problem_size.k() = int(problem_.k);
gemm_workspace_.configuration.lda = problem_.lda;
gemm_workspace_.configuration.ldb = problem_.ldb;
gemm_workspace_.configuration.ldc = problem_.ldc;
gemm_workspace_.configuration.ldd = problem_.ldc;
gemm_workspace_.configuration.lde = problem_.lde;
gemm_workspace_.arguments.A = nullptr;
gemm_workspace_.arguments.B = nullptr;
gemm_workspace_.arguments.C = nullptr;
gemm_workspace_.arguments.D = nullptr;
gemm_workspace_.arguments.E = nullptr;
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments);
}
/// Initializes the performance result
void SparseGemmOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::SparseGemmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
// Input bytes read and Output bytes written for the gemm problem
result.bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) *
problem_.k / problem_.sparse +
int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.n / 8) *
problem_.k +
int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) *
problem_.n +
int64_t(library::sizeof_bits(operation_desc.E.element) * problem_.m / 8) *
problem_.k / problem_.sparse / problem_.elements_per_128b;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(problem_.beta.begin(), problem_.beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
result.bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n;
}
result.flops = 2 * (problem_.m * problem_.n * problem_.k + problem_.m * problem_.n);
result.runtime = 0;
}
/// Initializes workspace
Status SparseGemmOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::SparseGemmDescription const &operation_desc =
static_cast<library::SparseGemmDescription const &>(operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
int seed_shift = 0;
gemm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.m), int(problem_.k) / int(problem_.sparse)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
gemm_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
{int(problem_.k), int(problem_.n)},
{int(problem_.ldb)},
1, // batch_count
seed_shift++
);
gemm_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
1, // batch_count
seed_shift++
);
gemm_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)}
);
gemm_workspace_.E = device_context.allocate_sparsemeta_tensor(
options,
"E",
operation_desc.E.element,
operation_desc.E.layout,
operation_desc.A.element,
{int(problem_.m), int(problem_.k) / int(problem_.sparse) / int(problem_.elements_per_128b)},
{int(problem_.lde)},
1, // batch_count
seed_shift++
);
gemm_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)}
);
gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(&gemm_workspace_.configuration);
gemm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(&gemm_workspace_.configuration);
gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = operation->initialize(
&gemm_workspace_.configuration,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kSparseGemm;
results_.back().disposition = Disposition::kNotRun;
for(auto &verification_provider : options.verification.providers) {
results_.back().verification_map[verification_provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool SparseGemmOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.E = gemm_workspace_.E->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool SparseGemmOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.E = gemm_workspace_.E->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/sparse_gemm_operation_profiler.cu/0 | {
"file_path": "tools/profiler/src/sparse_gemm_operation_profiler.cu",
"repo_id": "tools",
"token_count": 7352
} | 69 |
/******************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/util/device_utils.h"
#include <float.h>
namespace cutlass {
__global__ void rmsnorm_twoPassAlgo_e8(float4 *output, const float4 *input,
const float4 *weight,
const int m, const int n, float epsilon) {
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean;
float local_sums[1] = {0.0f};
const int n_8 = n / 8;
int offset = m_idx * n_8;
input += offset;
output += offset;
for (int index = tid; index < n_8; index += bdimx) {
const float4 local_val = input[index];
const half2 *h1 = (half2 *)&local_val.x;
const half2 *h2 = (half2 *)&local_val.y;
const half2 *h3 = (half2 *)&local_val.z;
const half2 *h4 = (half2 *)&local_val.w;
local_sums[0] += static_cast<float>(h1->x) * static_cast<float>(h1->x) +
static_cast<float>(h1->y) * static_cast<float>(h1->y) +
static_cast<float>(h2->x) * static_cast<float>(h2->x) +
static_cast<float>(h2->y) * static_cast<float>(h2->y) +
static_cast<float>(h3->x) * static_cast<float>(h3->x) +
static_cast<float>(h3->y) * static_cast<float>(h3->y) +
static_cast<float>(h4->x) * static_cast<float>(h4->x) +
static_cast<float>(h4->y) * static_cast<float>(h4->y);
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
} else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = rsqrtf(local_sums[0] / n + epsilon);
}
__syncthreads();
for (int index = tid; index < n_8; index += bdimx) {
const float4 local_val = input[index];
const float4 weight_val = weight[index];
const half2 *l1 = (half2 *)&local_val.x;
const half2 *l2 = (half2 *)&local_val.y;
const half2 *l3 = (half2 *)&local_val.z;
const half2 *l4 = (half2 *)&local_val.w;
const half2 *g1 = (half2 *)&weight_val.x;
const half2 *g2 = (half2 *)&weight_val.y;
const half2 *g3 = (half2 *)&weight_val.z;
const half2 *g4 = (half2 *)&weight_val.w;
float4 tmp;
half2 *h1 = (half2 *)&tmp.x;
half2 *h2 = (half2 *)&tmp.y;
half2 *h3 = (half2 *)&tmp.z;
half2 *h4 = (half2 *)&tmp.w;
h1->x = half(static_cast<float>(l1->x) * s_mean * static_cast<float>(g1->x));
h1->y = half(static_cast<float>(l1->y) * s_mean * static_cast<float>(g1->y));
h2->x = half(static_cast<float>(l2->x) * s_mean * static_cast<float>(g2->x));
h2->y = half(static_cast<float>(l2->y) * s_mean * static_cast<float>(g2->y));
h3->x = half(static_cast<float>(l3->x) * s_mean * static_cast<float>(g3->x));
h3->y = half(static_cast<float>(l3->y) * s_mean * static_cast<float>(g3->y));
h4->x = half(static_cast<float>(l4->x) * s_mean * static_cast<float>(g4->x));
h4->y = half(static_cast<float>(l4->y) * s_mean * static_cast<float>(g4->y));
output[index] = tmp;
}
}
template<typename T>
__global__ void rmsnorm_twoPassAlgo_e1(T* output,
const T* input,
const T* weight,
const int m, const int n,
float epsilon)
{
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean;
float local_sums[1] = {0.0f};
int offset = m_idx * n;
input += offset;
output += offset;
for (int index = tid ; index < n ; index += bdimx){
float local_val = static_cast<float>(input[index]);
local_sums[0] += local_val * local_val;
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = rsqrtf(local_sums[0] / n + epsilon);
}
__syncthreads();
for (int index = tid ; index < n ; index += bdimx){
const T weight_val = weight[index];
const T local_val = input[index];
output[index] = T(static_cast<float>(local_val) * s_mean * static_cast<float>(weight_val));
}
}
template <typename T>
void rmsnorm(cutlass::MatrixCoord tensor_size,
TensorRef<T, layout::RowMajor> ref_output,
TensorRef<T, layout::RowMajor> ref_input,
TensorRef<T, layout::RowMajor> ref_weight,
cudaStream_t stream, float epsilon = 1e-5f){
const int m = tensor_size.row();
const int n = tensor_size.column();
T* output = ref_output.data();
const T* input = ref_input.data();
const T* weight = ref_weight.data();
dim3 grid(m);
if (n % 8 == 0 && std::is_same<T, cutlass::half_t>::value) {
dim3 block(min(1024, (n / 8 + 31) / 32 * 32));
rmsnorm_twoPassAlgo_e8<<<grid, block, 0, stream>>>(
(float4 *)output, (const float4 *)input, (const float4 *)weight, m, n, epsilon);
} else {
dim3 block(min(1024, ((n + 31)/32 + 31)/32*32));
rmsnorm_twoPassAlgo_e1<<<grid, block, 0, stream>>>(
output, input, weight, m, n, epsilon);
}
auto result = cudaGetLastError();
if (result != cudaSuccess) {
std::cerr << "CUDA error: " << cudaGetErrorString(result) << std::endl;
abort();
}
}
} // namespace cutlass
| tools/util/include/cutlass/util/device_rmsnorm.h/0 | {
"file_path": "tools/util/include/cutlass/util/device_rmsnorm.h",
"repo_id": "tools",
"token_count": 3049
} | 70 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for GEMM in device-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/util/reference/device/kernel/gemm.h"
namespace cutlass {
namespace reference {
namespace device {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp = multiply_add<AccumulatorType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
// Blocking structure potentially improves performance of reference implementation
// with a minor increase in complexity.
//
// Note, this reference implementation is NOT expected to approach peak performance.
using OutputTile = MatrixShape<4, 4>;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * OutputTile::kRow - 1) / (block.x * OutputTile::kRow),
(problem_size.n() + block.y * OutputTile::kColumn - 1) / (block.y * OutputTile::kColumn)
);
// Launch a GEMM kernel
kernel::Gemm<
TensorRef<ElementA, LayoutA>,
TensorRef<ElementB, LayoutB>,
TensorRef<ElementC, LayoutC>,
ScalarType,
AccumulatorType,
OutputTile,
InnerProductOp,
ConvertOp
><<< grid, block >>>(
problem_size,
alpha,
tensor_a,
tensor_b,
beta,
tensor_c,
tensor_d,
initial_accum
);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// This assumes the accumulator type is the same type as the scalars.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp = multiply_add<AccumulatorType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum) {
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, InnerProductOp, ConvertOp>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c,
initial_accum);
}
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp = cutlass::arch::OpMultiplyAdd
>
struct Gemm;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename AccumulatorType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, arch::OpMultiplyAdd> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add-saturate
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename AccumulatorType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
AccumulatorType, arch::OpMultiplyAddSaturate> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for XOR-popc
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename AccumulatorType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
AccumulatorType, arch::OpXorPopc> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, xor_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, xor_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Batched GEMM
//
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a batch of GEMMs over a set of matrices of common dimension.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp,
typename ConvertOp
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c,
AccumulatorType initial_accum) {
static_assert(
TensorRefCollectionA::kRank == 2 &&
TensorRefCollectionB::kRank == 2 &&
TensorRefCollectionC::kRank == 2, "Tensors must be of rank 2");
// Blocking structure potentially improves performance of reference implementation
// with a minor increase in complexity.
//
// Note, this reference implementation is NOT expected to approach peak performance.
using OutputTile = MatrixShape<4, 4>;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * OutputTile::kRow - 1) / (block.x * OutputTile::kRow),
(problem_size.n() + block.y * OutputTile::kColumn - 1) / (block.y * OutputTile::kColumn),
batch_count
);
// Launch a GEMM kernel
kernel::BatchedGemm<
TensorRefCollectionA,
TensorRefCollectionB,
TensorRefCollectionC,
ScalarType,
AccumulatorType,
OutputTile,
InnerProductOp,
ConvertOp
><<< grid, block >>>(
problem_size,
alpha,
tensor_a,
tensor_b,
beta,
tensor_c,
initial_accum
);
}
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c) {
BatchedGemm(problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, ScalarType(0));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/device/gemm.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/gemm.h",
"repo_id": "tools",
"token_count": 5124
} | 71 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <stdexcept>
#include "cutlass/cutlass.h"
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines several helpers
namespace detail {
/// Helper to perform for-each operation
template <typename Func, int Rank, int RankRemaining>
struct TensorForEachHelper {
/// Index of the active rank
static int const kActiveRank = Rank - RankRemaining - 1;
/// Constructor for general rank
TensorForEachHelper(
Func &func,
Coord<Rank> const &extent,
Coord<Rank> &coord) {
for (int i = 0; i < extent.at(kActiveRank); ++i) {
coord[kActiveRank] = i;
TensorForEachHelper<Func, Rank, RankRemaining - 1>(func, extent, coord);
}
}
};
/// Helper to perform for-each operation
template <typename Func, int Rank>
struct TensorForEachHelper<Func, Rank, 0> {
/// Index of the active rank
static int const kActiveRank = Rank - 1;
/// Constructor for fastest changing rank
TensorForEachHelper(
Func &func,
Coord<Rank> const &extent,
Coord<Rank> &coord) {
for (int i = 0; i < extent.at(kActiveRank); ++i) {
coord[kActiveRank] = i;
func(coord);
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Iterates over the index space of a tensor
template <
typename Func, ///< function applied to each point in a tensor's index space
int Rank> ///< rank of index space
void TensorForEach(Coord<Rank> extent, Func & func) {
Coord<Rank> coord;
detail::TensorForEachHelper<Func, Rank, Rank - 1>(func, extent, coord);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Iterates over the index space of a tensor and calls a C++ lambda
template <
typename Func, ///< function applied to each point in a tensor's index space
int Rank> ///< rank of index space
void TensorForEachLambda(Coord<Rank> extent, Func func) {
Coord<Rank> coord;
detail::TensorForEachHelper<Func, Rank, Rank - 1>(func, extent, coord);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Func>
struct BlockForEach {
/// Constructor performs the operation.
BlockForEach(
Element *ptr,
size_t capacity,
typename Func::Params params = typename Func::Params()) {
Func func(params);
for (size_t index = 0; index < capacity; ++index) {
ptr[index] = func();
}
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/host/tensor_foreach.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/tensor_foreach.h",
"repo_id": "tools",
"token_count": 1327
} | 72 |