hip
stringlengths
140
3.32k
cuda
stringlengths
84
3.33k
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip\JitLoops.cuh> #include <ATen/native/hip\Loops.cuh> #include <ATen/native/hip\Math.cuh> #include <ATen/native/hip\jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/hip/HIPMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { CONSTEXPR_EXCEPT_WIN_HIP char modified_bessel_i1_name[] = "modified_bessel_i1_forward"; void modified_bessel_i1_kernel_hip(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i1_hip", [&]() { jitted_gpu_kernel<modified_bessel_i1_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_i1_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i1_hip", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return modified_bessel_i1_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_modified_bessel_i1_stub, &modified_bessel_i1_kernel_hip); } // namespace at::native ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <ATen/native/cuda/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/cuda/CUDAMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_i1_name[] = "modified_bessel_i1_forward"; void modified_bessel_i1_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i1_cuda", [&]() { jitted_gpu_kernel<modified_bessel_i1_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_i1_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i1_cuda", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return modified_bessel_i1_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_modified_bessel_i1_stub, &modified_bessel_i1_kernel_cuda); } // namespace at::native ###
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip\JitLoops.cuh> #include <ATen/native/hip\Loops.cuh> #include <ATen/native/hip\Math.cuh> #include <ATen/native/hip\jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/hip/HIPMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { CONSTEXPR_EXCEPT_WIN_HIP char modified_bessel_k0_name[] = "modified_bessel_k0_forward"; void modified_bessel_k0_kernel_hip(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_k0_hip", [&]() { jitted_gpu_kernel<modified_bessel_k0_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_k0_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_k0_hip", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return modified_bessel_k0_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_modified_bessel_k0_stub, &modified_bessel_k0_kernel_hip); } // namespace at::native ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <ATen/native/cuda/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/cuda/CUDAMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_k0_name[] = "modified_bessel_k0_forward"; void modified_bessel_k0_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_k0_cuda", [&]() { jitted_gpu_kernel<modified_bessel_k0_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_k0_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_k0_cuda", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return modified_bessel_k0_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_modified_bessel_k0_stub, &modified_bessel_k0_kernel_cuda); } // namespace at::native ###
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip\JitLoops.cuh> #include <ATen/native/hip\Loops.cuh> #include <ATen/native/hip\Math.cuh> #include <ATen/native/hip\jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/hip/HIPMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { CONSTEXPR_EXCEPT_WIN_HIP char modified_bessel_k1_name[] = "modified_bessel_k1_forward"; void modified_bessel_k1_kernel_hip(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_k1_hip", [&]() { jitted_gpu_kernel<modified_bessel_k1_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_k1_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_k1_hip", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return modified_bessel_k1_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_modified_bessel_k1_stub, &modified_bessel_k1_kernel_hip); } // namespace at::native ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <ATen/native/cuda/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/cuda/CUDAMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_k1_name[] = "modified_bessel_k1_forward"; void modified_bessel_k1_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_k1_cuda", [&]() { jitted_gpu_kernel<modified_bessel_k1_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_k1_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_k1_cuda", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return modified_bessel_k1_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_modified_bessel_k1_stub, &modified_bessel_k1_kernel_cuda); } // namespace at::native ###
// !!! This is a file automatically generated by hipify!!! #pragma once #include <ATen/native/Pow.h> #include <c10/core/Scalar.h> namespace at { namespace native { namespace { // SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt. // So we need to define the functions with the explicit function signatures. // As for pow, the following signatures are defined as the device function: // pow(float, int) // pow(double, int) // pow(float, float) // pow(double, double) #ifdef _MSC_VER // Functions for pow // pow for at::Half static inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) { return static_cast<at::Half>(::pow(static_cast<float>(base), static_cast<float>(exp))); } // pow for at::BFloat16 static inline __host__ __device__ at::BFloat16 pow_(at::BFloat16 base, at::BFloat16 exp) { return static_cast<at::BFloat16>(::pow(static_cast<float>(base), static_cast<float>(exp))); } // pow (floating, floating/int) template <typename Base_type, typename Exp_type> static inline __host__ __device__ typename std::enable_if<std::is_floating_point<Base_type>::value && (std::is_same<Base_type, Exp_type>::value || std::is_same<Exp_type, int>::value), Base_type>::type pow_(Base_type base, Exp_type exp) { return ::pow(base, exp); } // pow (Otherwise) template <typename Base_type, typename Exp_type> static inline __host__ __device__ typename std::enable_if<!std::is_same<Base_type, Exp_type>::value && !std::is_same<Exp_type, int>::value, Base_type>::type pow_(Base_type base, Exp_type exp) { return static_cast<Base_type>(::pow(static_cast<double>(base), static_cast<double>(exp))); } #else template <typename Base_type, typename Exp_type> static inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) { return ::pow(base, exp); } #endif template <typename T> static inline __host__ __device__ std::enable_if_t<std::is_integral<T>::value, T> pow_( T base, T exp) { return at::native::powi(base, exp); } template <typename T> static inline __host__ __device__ c10::complex<T> pow_(c10::complex<T> base, c10::complex<T> exp) { return c10_complex_math::pow(base, exp); } } // namespace }} // namespace at::native ###
#pragma once #include <ATen/native/Pow.h> #include <c10/core/Scalar.h> namespace at { namespace native { namespace { // SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt. // So we need to define the functions with the explicit function signatures. // As for pow, the following signatures are defined as the device function: // pow(float, int) // pow(double, int) // pow(float, float) // pow(double, double) #ifdef _MSC_VER // Functions for pow // pow for at::Half static inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) { return static_cast<at::Half>(std::pow(static_cast<float>(base), static_cast<float>(exp))); } // pow for at::BFloat16 static inline __host__ __device__ at::BFloat16 pow_(at::BFloat16 base, at::BFloat16 exp) { return static_cast<at::BFloat16>(std::pow(static_cast<float>(base), static_cast<float>(exp))); } // pow (floating, floating/int) template <typename Base_type, typename Exp_type> static inline __host__ __device__ typename std::enable_if<std::is_floating_point<Base_type>::value && (std::is_same<Base_type, Exp_type>::value || std::is_same<Exp_type, int>::value), Base_type>::type pow_(Base_type base, Exp_type exp) { return std::pow(base, exp); } // pow (Otherwise) template <typename Base_type, typename Exp_type> static inline __host__ __device__ typename std::enable_if<!std::is_same<Base_type, Exp_type>::value && !std::is_same<Exp_type, int>::value, Base_type>::type pow_(Base_type base, Exp_type exp) { return static_cast<Base_type>(std::pow(static_cast<double>(base), static_cast<double>(exp))); } #else template <typename Base_type, typename Exp_type> static inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) { return ::pow(base, exp); } #endif template <typename T> static inline __host__ __device__ std::enable_if_t<std::is_integral<T>::value, T> pow_( T base, T exp) { return at::native::powi(base, exp); } template <typename T> static inline __host__ __device__ c10::complex<T> pow_(c10::complex<T> base, c10::complex<T> exp) { return c10_complex_math::pow(base, exp); } } // namespace }} // namespace at::native ###
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/hip\HIPGeneratorImpl.h> #include <ATen/hip\HIPGraphsUtils.cuh> #include <ATen/Utils.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_kernel.h> namespace { // See note [Algorithm of randperm] template<typename T, typename scalar_t> __global__ void randperm_handle_duplicate_keys_kernel(T *keys, scalar_t *data, T mask, int n, at::PhiloxHipState philox_args) { int tid = threadIdx.x + blockDim.x * blockIdx.x; // find the beginning of islands if (tid >= n - 1) return; // out of range if ((keys[tid] & mask) != (keys[tid + 1] & mask)) return; // not in an island if (tid != 0 && (keys[tid] & mask) == (keys[tid - 1] & mask)) return; // not the beginning of an island // find the size of islands int island_size = 0; do { island_size++; } while ((tid + island_size < n) && (keys[tid + island_size] & mask) == (keys[tid] & mask)); // do random permutation inside each island. data += tid; auto seeds = at::cuda::philox::unpack(philox_args); hiprandStatePhilox4_32_10_t state; hiprand_init(std::get<0>(seeds), tid, std::get<1>(seeds), &state); for (int i = island_size - 1; i > 0; i--) { unsigned int r = hiprand(&state) % (i + 1); if (i != r) { scalar_t tmp = data[i]; data[i] = data[r]; data[r] = tmp; } } } // See note [Algorithm of randperm] template<typename T, typename scalar_t> void randperm_handle_duplicate_keys(T *keys, scalar_t *data, int bits, int64_t n, c10::optional<at::Generator> &gen_) { auto gen = at::get_generator_or_default<at::HIPGeneratorImpl>(gen_, at::cuda::detail::getDefaultHIPGenerator()); int64_t counter_offset = n; at::PhiloxHipState rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_hip_state(counter_offset); } T mask = static_cast<T>((1UL << bits) - 1); hipLaunchKernelGGL(( randperm_handle_duplicate_keys_kernel), dim3((n + 511) / 512), dim3(512), 0, at::hip::getCurrentHIPStream(), keys, data, mask, n, rng_engine_inputs); C10_HIP_KERNEL_LAUNCH_CHECK(); } } ###
#include <ATen/cuda/CUDAGeneratorImpl.h> #include <ATen/cuda/CUDAGraphsUtils.cuh> #include <ATen/Utils.h> #include <curand.h> #include <curand_kernel.h> #include <curand_philox4x32_x.h> namespace { // See note [Algorithm of randperm] template<typename T, typename scalar_t> __global__ void randperm_handle_duplicate_keys_kernel(T *keys, scalar_t *data, T mask, int n, at::PhiloxCudaState philox_args) { int tid = threadIdx.x + blockDim.x * blockIdx.x; // find the beginning of islands if (tid >= n - 1) return; // out of range if ((keys[tid] & mask) != (keys[tid + 1] & mask)) return; // not in an island if (tid != 0 && (keys[tid] & mask) == (keys[tid - 1] & mask)) return; // not the beginning of an island // find the size of islands int island_size = 0; do { island_size++; } while ((tid + island_size < n) && (keys[tid + island_size] & mask) == (keys[tid] & mask)); // do random permutation inside each island. data += tid; auto seeds = at::cuda::philox::unpack(philox_args); curandStatePhilox4_32_10_t state; curand_init(std::get<0>(seeds), tid, std::get<1>(seeds), &state); for (int i = island_size - 1; i > 0; i--) { unsigned int r = curand(&state) % (i + 1); if (i != r) { scalar_t tmp = data[i]; data[i] = data[r]; data[r] = tmp; } } } // See note [Algorithm of randperm] template<typename T, typename scalar_t> void randperm_handle_duplicate_keys(T *keys, scalar_t *data, int bits, int64_t n, c10::optional<at::Generator> &gen_) { auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(gen_, at::cuda::detail::getDefaultCUDAGenerator()); int64_t counter_offset = n; at::PhiloxCudaState rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_cuda_state(counter_offset); } T mask = static_cast<T>((1UL << bits) - 1); randperm_handle_duplicate_keys_kernel<<<(n + 511) / 512, 512, 0, at::cuda::getCurrentCUDAStream()>>>( keys, data, mask, n, rng_engine_inputs); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } ###
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <c10/hip/HIPCachingAllocator.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/NativeFunctions.h> #else #include <ATen/ops/record_stream_native.h> #endif namespace at::native { void record_stream_hip(Tensor& self, c10::Stream stream) { struct c10::StreamData3 data = stream.pack3(); c10::hip::HIPCachingAllocator::recordStream(self.storage().data_ptr(), at::hip::HIPStream::unpack3(data.stream_id, data.device_index, data.device_type)); } } // namespace at::native ###
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <c10/cuda/CUDACachingAllocator.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/NativeFunctions.h> #else #include <ATen/ops/record_stream_native.h> #endif namespace at::native { void record_stream_cuda(Tensor& self, c10::Stream stream) { struct c10::StreamData3 data = stream.pack3(); c10::cuda::CUDACachingAllocator::recordStream(self.storage().data_ptr(), at::cuda::CUDAStream::unpack3(data.stream_id, data.device_index, data.device_type)); } } // namespace at::native ###
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/hip\Sleep.h> #include <c10/hip/HIPException.h> #include <c10/hip/HIPStream.h> namespace at { namespace hip { namespace { __global__ void spin_kernel(int64_t cycles) { // Few AMD specific GPUs have different clock intrinsic #if defined(__GFX11__) && defined(USE_ROCM) && !defined(__HIP_ARCH__) int64_t start_clock = wall_clock64(); #else // see concurrentKernels HIP sampl int64_t start_clock = clock64(); #endif int64_t clock_offset = 0; while (clock_offset < cycles) { #if defined(__GFX11__) && defined(USE_ROCM) && !defined(__HIP_ARCH__) clock_offset = wall_clock64() - start_clock; #else clock_offset = clock64() - start_clock; #endif } } } void sleep(int64_t cycles) { dim3 grid(1); dim3 block(1); hipLaunchKernelGGL(( spin_kernel), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStream(), cycles); C10_HIP_KERNEL_LAUNCH_CHECK(); } }} // namespace at::cuda ###
#include <ATen/cuda/Sleep.h> #include <c10/cuda/CUDAException.h> #include <c10/cuda/CUDAStream.h> namespace at { namespace cuda { namespace { __global__ void spin_kernel(int64_t cycles) { // Few AMD specific GPUs have different clock intrinsic #if defined(__GFX11__) && defined(USE_ROCM) && !defined(__CUDA_ARCH__) int64_t start_clock = wall_clock64(); #else // see concurrentKernels CUDA sampl int64_t start_clock = clock64(); #endif int64_t clock_offset = 0; while (clock_offset < cycles) { #if defined(__GFX11__) && defined(USE_ROCM) && !defined(__CUDA_ARCH__) clock_offset = wall_clock64() - start_clock; #else clock_offset = clock64() - start_clock; #endif } } } void sleep(int64_t cycles) { dim3 grid(1); dim3 block(1); spin_kernel<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(cycles); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }} // namespace at::cuda ###
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/hip\Reduce.cuh> #include <c10/util/ArrayRef.h> #include <iostream> namespace at::native { static inline std::ostream& operator<<(std::ostream& out, dim3 dim) { if (dim.y == 1 && dim.z == 1) { out << dim.x; } else { out << "[" << dim.x << "," << dim.y << "," << dim.z << "]"; } return out; } std::ostream& operator<<(std::ostream& out, const ReduceConfig& config) { out << "ReduceConfig("; out << "element_size_bytes=" << config.element_size_bytes << ", "; out << "num_inputs=" << config.num_inputs << ", "; out << "num_outputs=" << config.num_outputs << ", "; out << "step_input=" << config.step_input << ", "; out << "step_output=" << config.step_output << ", "; out << "ctas_per_output=" << config.ctas_per_output << ", "; out << "input_mult=["; for (int i = 0; i < 3; i++) { if (i != 0) { out << ","; } out << config.input_mult[i]; } out << "], "; out << "output_mult=["; for (int i = 0; i < 2; i++) { if (i != 0) { out << ","; } out << config.output_mult[i]; } out << "], "; out << "vectorize_input=" << config.vectorize_input << ", "; out << "output_vec_size=" << config.output_vec_size << ", "; out << "block_width=" << config.block_width << ", "; out << "block_height=" << config.block_height << ", "; out << "num_threads=" << config.num_threads << ", "; out << "values_per_thread=" << config.values_per_thread() << ", "; out << "block=" << config.block() << ", "; out << "grid=" << config.grid() << ", "; out << "global_memory_size=" << config.global_memory_size(); out << ")"; return out; } } // namespace at::native ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/cuda/Reduce.cuh> #include <c10/util/ArrayRef.h> #include <iostream> namespace at::native { static inline std::ostream& operator<<(std::ostream& out, dim3 dim) { if (dim.y == 1 && dim.z == 1) { out << dim.x; } else { out << "[" << dim.x << "," << dim.y << "," << dim.z << "]"; } return out; } std::ostream& operator<<(std::ostream& out, const ReduceConfig& config) { out << "ReduceConfig("; out << "element_size_bytes=" << config.element_size_bytes << ", "; out << "num_inputs=" << config.num_inputs << ", "; out << "num_outputs=" << config.num_outputs << ", "; out << "step_input=" << config.step_input << ", "; out << "step_output=" << config.step_output << ", "; out << "ctas_per_output=" << config.ctas_per_output << ", "; out << "input_mult=["; for (int i = 0; i < 3; i++) { if (i != 0) { out << ","; } out << config.input_mult[i]; } out << "], "; out << "output_mult=["; for (int i = 0; i < 2; i++) { if (i != 0) { out << ","; } out << config.output_mult[i]; } out << "], "; out << "vectorize_input=" << config.vectorize_input << ", "; out << "output_vec_size=" << config.output_vec_size << ", "; out << "block_width=" << config.block_width << ", "; out << "block_height=" << config.block_height << ", "; out << "num_threads=" << config.num_threads << ", "; out << "values_per_thread=" << config.values_per_thread() << ", "; out << "block=" << config.block() << ", "; out << "grid=" << config.grid() << ", "; out << "global_memory_size=" << config.global_memory_size(); out << ")"; return out; } } // namespace at::native ###
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/ReduceAllOps.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/TensorCompare.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip\ReduceOps.h> #include <ATen/hip\NumericLimits.cuh> #include <ATen/native/hip\Reduce.cuh> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/hip\NumericLimits.cuh> namespace at::native { template <typename scalar_t> void _min_max_values_kernel_hip_impl(TensorIterator& iter) { gpu_reduce_kernel<scalar_t, scalar_t>( iter, MinMaxOps<scalar_t, scalar_t, int32_t>{}, thrust::pair<scalar_t, scalar_t>( at::numeric_limits<scalar_t>::upper_bound(), at::numeric_limits<scalar_t>::lower_bound())); } void aminmax_allreduce_launch_kernel(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3( kBFloat16, kHalf, kBool, iter.input_dtype(), "aminmax_all_hip", [&] { _min_max_values_kernel_hip_impl<scalar_t>(iter); }); } void aminmax_launch_kernel(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3( kBFloat16, kHalf, kBool, iter.input_dtype(), "aminmax_hip", [&]() { gpu_reduce_kernel<scalar_t, scalar_t>( iter, MinMaxOps<scalar_t, scalar_t, int32_t>{}, thrust::pair<scalar_t, scalar_t>( at::numeric_limits<scalar_t>::upper_bound(), at::numeric_limits<scalar_t>::lower_bound())); }); } } // namespace at::native ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/ReduceAllOps.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/TensorCompare.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/ReduceOps.h> #include <ATen/cuda/NumericLimits.cuh> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/cuda/NumericLimits.cuh> namespace at::native { template <typename scalar_t> void _min_max_values_kernel_cuda_impl(TensorIterator& iter) { gpu_reduce_kernel<scalar_t, scalar_t>( iter, MinMaxOps<scalar_t, scalar_t, int32_t>{}, thrust::pair<scalar_t, scalar_t>( at::numeric_limits<scalar_t>::upper_bound(), at::numeric_limits<scalar_t>::lower_bound())); } void aminmax_allreduce_launch_kernel(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3( kBFloat16, kHalf, kBool, iter.input_dtype(), "aminmax_all_cuda", [&] { _min_max_values_kernel_cuda_impl<scalar_t>(iter); }); } void aminmax_launch_kernel(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3( kBFloat16, kHalf, kBool, iter.input_dtype(), "aminmax_cuda", [&]() { gpu_reduce_kernel<scalar_t, scalar_t>( iter, MinMaxOps<scalar_t, scalar_t, int32_t>{}, thrust::pair<scalar_t, scalar_t>( at::numeric_limits<scalar_t>::upper_bound(), at::numeric_limits<scalar_t>::lower_bound())); }); } } // namespace at::native ###
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/ReduceAllOps.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/TensorCompare.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip\ReduceOps.h> #include <ATen/hip\NumericLimits.cuh> #include <ATen/native/hip\Reduce.cuh> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/hip\NumericLimits.cuh> namespace at::native { template <typename scalar_t, typename acc_t = scalar_t> void argmax_kernel_hip_impl(TensorIterator& iter) { gpu_reduce_kernel<scalar_t, int64_t>( iter, ArgMaxOps<acc_t>{}, thrust::pair<acc_t, int64_t>( at::numeric_limits<acc_t>::lower_bound(), 0)); }; void argmax_kernel_hip(TensorIterator& iter) { // For float16 & bfloat16, instead of implementing is_nan and warp_shfl_down, // we can convert float16 & bfloat16 to float and do all the operations in // float. if (iter.dtype(1) == kHalf) { argmax_kernel_hip_impl<at::Half, float>(iter); } else if (iter.dtype(1) == kBFloat16) { argmax_kernel_hip_impl<at::BFloat16, float>(iter); } else { AT_DISPATCH_ALL_TYPES(iter.dtype(1), "argmax_hip", [&]() { argmax_kernel_hip_impl<scalar_t>(iter); }); } } REGISTER_DISPATCH(argmax_stub, &argmax_kernel_hip); } // namespace at::native ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/ReduceAllOps.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/TensorCompare.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/ReduceOps.h> #include <ATen/cuda/NumericLimits.cuh> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/cuda/NumericLimits.cuh> namespace at::native { template <typename scalar_t, typename acc_t = scalar_t> void argmax_kernel_cuda_impl(TensorIterator& iter) { gpu_reduce_kernel<scalar_t, int64_t>( iter, ArgMaxOps<acc_t>{}, thrust::pair<acc_t, int64_t>( at::numeric_limits<acc_t>::lower_bound(), 0)); }; void argmax_kernel_cuda(TensorIterator& iter) { // For float16 & bfloat16, instead of implementing is_nan and warp_shfl_down, // we can convert float16 & bfloat16 to float and do all the operations in // float. if (iter.dtype(1) == kHalf) { argmax_kernel_cuda_impl<at::Half, float>(iter); } else if (iter.dtype(1) == kBFloat16) { argmax_kernel_cuda_impl<at::BFloat16, float>(iter); } else { AT_DISPATCH_ALL_TYPES(iter.dtype(1), "argmax_cuda", [&]() { argmax_kernel_cuda_impl<scalar_t>(iter); }); } } REGISTER_DISPATCH(argmax_stub, &argmax_kernel_cuda); } // namespace at::native ###
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/ReduceAllOps.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/TensorCompare.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip\ReduceOps.h> #include <ATen/hip\NumericLimits.cuh> #include <ATen/native/hip\Reduce.cuh> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/hip\NumericLimits.cuh> namespace at::native { template <typename scalar_t, typename acc_t = scalar_t> void argmin_kernel_hip_impl(TensorIterator& iter) { gpu_reduce_kernel<scalar_t, int64_t>( iter, ArgMinOps<acc_t>{}, thrust::pair<acc_t, int64_t>( at::numeric_limits<acc_t>::upper_bound(), 0)); }; void argmin_kernel_hip(TensorIterator& iter) { // For float16 & bfloat16, instead of implementing is_nan and warp_shfl_down, // we can convert float16 & bfloat16 to float and do all the operations in // float. if (iter.dtype(1) == kHalf) { argmin_kernel_hip_impl<at::Half, float>(iter); } else if (iter.dtype(1) == kBFloat16) { argmin_kernel_hip_impl<at::BFloat16, float>(iter); } else { AT_DISPATCH_ALL_TYPES(iter.dtype(1), "argmin_hip", [&]() { argmin_kernel_hip_impl<scalar_t>(iter); }); } } REGISTER_DISPATCH(argmin_stub, &argmin_kernel_hip); } // namespace at::native ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/ReduceAllOps.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/TensorCompare.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/ReduceOps.h> #include <ATen/cuda/NumericLimits.cuh> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/cuda/NumericLimits.cuh> namespace at::native { template <typename scalar_t, typename acc_t = scalar_t> void argmin_kernel_cuda_impl(TensorIterator& iter) { gpu_reduce_kernel<scalar_t, int64_t>( iter, ArgMinOps<acc_t>{}, thrust::pair<acc_t, int64_t>( at::numeric_limits<acc_t>::upper_bound(), 0)); }; void argmin_kernel_cuda(TensorIterator& iter) { // For float16 & bfloat16, instead of implementing is_nan and warp_shfl_down, // we can convert float16 & bfloat16 to float and do all the operations in // float. if (iter.dtype(1) == kHalf) { argmin_kernel_cuda_impl<at::Half, float>(iter); } else if (iter.dtype(1) == kBFloat16) { argmin_kernel_cuda_impl<at::BFloat16, float>(iter); } else { AT_DISPATCH_ALL_TYPES(iter.dtype(1), "argmin_cuda", [&]() { argmin_kernel_cuda_impl<scalar_t>(iter); }); } } REGISTER_DISPATCH(argmin_stub, &argmin_kernel_cuda); } // namespace at::native ###
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/TensorIterator.h> #include <ATen/native/hip\Reduce.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/ReduceOps.h> #include <ATen/Dispatch.h> namespace at::native { void and_kernel_hip(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kHalf, kBFloat16, kBool, iter.common_dtype(), "and_hip", [&]() { gpu_reduce_kernel<scalar_t, bool>( iter, func_wrapper<bool>([] GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return (static_cast<bool>(a) && static_cast<bool>(b)); }), true); }); } void or_kernel_hip(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kHalf, kBFloat16, kBool, iter.common_dtype(), "or_hip", [&]() { gpu_reduce_kernel<scalar_t, bool>( iter, func_wrapper<bool>([] GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return (static_cast<bool>(a) || static_cast<bool>(b)); }), false); }); } REGISTER_DISPATCH(and_stub, &and_kernel_hip); REGISTER_DISPATCH(or_stub, &or_kernel_hip); } // namespace at::native ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/ReduceOps.h> #include <ATen/Dispatch.h> namespace at::native { void and_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kHalf, kBFloat16, kBool, iter.common_dtype(), "and_cuda", [&]() { gpu_reduce_kernel<scalar_t, bool>( iter, func_wrapper<bool>([] GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return (static_cast<bool>(a) && static_cast<bool>(b)); }), true); }); } void or_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kHalf, kBFloat16, kBool, iter.common_dtype(), "or_cuda", [&]() { gpu_reduce_kernel<scalar_t, bool>( iter, func_wrapper<bool>([] GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return (static_cast<bool>(a) || static_cast<bool>(b)); }), false); }); } REGISTER_DISPATCH(and_stub, &and_kernel_cuda); REGISTER_DISPATCH(or_stub, &or_kernel_cuda); } // namespace at::native ###
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/ReduceAllOps.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/TensorCompare.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip\ReduceOps.h> #include <ATen/hip\NumericLimits.cuh> #include <ATen/native/hip\Reduce.cuh> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/hip\NumericLimits.cuh> namespace at::native { template <typename acc_t> struct MaxNanFunctor { __device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const { return (at::_isnan(a) || a > b) ? a : b; } }; template <typename scalar_t, typename acc_t = scalar_t> void max_values_kernel_hip_impl(TensorIterator& iter) { gpu_reduce_kernel<scalar_t, scalar_t>( iter, func_wrapper<acc_t>(MaxNanFunctor<acc_t>()), at::numeric_limits<acc_t>::lower_bound()); } void max_values_kernel_hip(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3( kBFloat16, kHalf, kBool, iter.dtype(), "max_values_hip", [&]() { max_values_kernel_hip_impl<scalar_t>(iter); }); } void max_launch_kernel(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3( kBFloat16, kHalf, kBool, iter.input_dtype(), "max_hip", [&]() { gpu_reduce_kernel<scalar_t, scalar_t>( iter, MaxOps<scalar_t>{}, thrust::pair<scalar_t, int64_t>( at::numeric_limits<scalar_t>::lower_bound(), 0)); }); } void max_all_launch_kernel(TensorIterator &iter) { AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "max_all_hip", [&] { max_values_kernel_hip_impl<scalar_t>(iter); }); } REGISTER_DISPATCH(max_values_stub, &max_values_kernel_hip); } // namespace at::native ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/ReduceAllOps.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/TensorCompare.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/ReduceOps.h> #include <ATen/cuda/NumericLimits.cuh> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/cuda/NumericLimits.cuh> namespace at::native { template <typename acc_t> struct MaxNanFunctor { __device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const { return (at::_isnan(a) || a > b) ? a : b; } }; template <typename scalar_t, typename acc_t = scalar_t> void max_values_kernel_cuda_impl(TensorIterator& iter) { gpu_reduce_kernel<scalar_t, scalar_t>( iter, func_wrapper<acc_t>(MaxNanFunctor<acc_t>()), at::numeric_limits<acc_t>::lower_bound()); } void max_values_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3( kBFloat16, kHalf, kBool, iter.dtype(), "max_values_cuda", [&]() { max_values_kernel_cuda_impl<scalar_t>(iter); }); } void max_launch_kernel(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3( kBFloat16, kHalf, kBool, iter.input_dtype(), "max_cuda", [&]() { gpu_reduce_kernel<scalar_t, scalar_t>( iter, MaxOps<scalar_t>{}, thrust::pair<scalar_t, int64_t>( at::numeric_limits<scalar_t>::lower_bound(), 0)); }); } void max_all_launch_kernel(TensorIterator &iter) { AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "max_all_cuda", [&] { max_values_kernel_cuda_impl<scalar_t>(iter); }); } REGISTER_DISPATCH(max_values_stub, &max_values_kernel_cuda); } // namespace at::native ###
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/TensorIterator.h> #include <ATen/native/hip\Reduce.cuh> #include <ATen/native/hip\ReduceOps.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/Dispatch.h> #include <ATen/hip\NumericLimits.cuh> #include <ATen/native/ReduceOps.h> #include <ATen/native/ReduceAllOps.h> #include <ATen/native/TensorCompare.h> #include <ATen/NumericUtils.h> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/hip\NumericLimits.cuh> namespace at::native { template <typename acc_t> struct MinNanFunctor { __device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const { return (at::_isnan(a) || a < b) ? a : b; } }; template <typename scalar_t, typename acc_t=scalar_t> void min_values_kernel_hip_impl(TensorIterator& iter) { gpu_reduce_kernel<scalar_t, scalar_t>( iter, func_wrapper<acc_t> (MinNanFunctor<acc_t>()), at::numeric_limits<acc_t>::upper_bound()); } void min_values_kernel_hip(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "min_values_hip", [&]() { min_values_kernel_hip_impl<scalar_t>(iter); }); } void min_launch_kernel(TensorIterator &iter) { AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "min_hip", [&]() { gpu_reduce_kernel<scalar_t, scalar_t>( iter, MinOps<scalar_t>{}, thrust::pair<scalar_t, int64_t>(at::numeric_limits<scalar_t>::upper_bound(), 0)); }); } void min_all_launch_kernel(TensorIterator &iter) { AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "min_all_hip", [&] { min_values_kernel_hip_impl<scalar_t>(iter); }); } REGISTER_DISPATCH(min_values_stub, &min_values_kernel_hip); } // namespace at::native ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/native/cuda/ReduceOps.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/Dispatch.h> #include <ATen/cuda/NumericLimits.cuh> #include <ATen/native/ReduceOps.h> #include <ATen/native/ReduceAllOps.h> #include <ATen/native/TensorCompare.h> #include <ATen/NumericUtils.h> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/cuda/NumericLimits.cuh> namespace at::native { template <typename acc_t> struct MinNanFunctor { __device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const { return (at::_isnan(a) || a < b) ? a : b; } }; template <typename scalar_t, typename acc_t=scalar_t> void min_values_kernel_cuda_impl(TensorIterator& iter) { gpu_reduce_kernel<scalar_t, scalar_t>( iter, func_wrapper<acc_t> (MinNanFunctor<acc_t>()), at::numeric_limits<acc_t>::upper_bound()); } void min_values_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "min_values_cuda", [&]() { min_values_kernel_cuda_impl<scalar_t>(iter); }); } void min_launch_kernel(TensorIterator &iter) { AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "min_cuda", [&]() { gpu_reduce_kernel<scalar_t, scalar_t>( iter, MinOps<scalar_t>{}, thrust::pair<scalar_t, int64_t>(at::numeric_limits<scalar_t>::upper_bound(), 0)); }); } void min_all_launch_kernel(TensorIterator &iter) { AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "min_all_cuda", [&] { min_values_kernel_cuda_impl<scalar_t>(iter); }); } REGISTER_DISPATCH(min_values_stub, &min_values_kernel_cuda); } // namespace at::native ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip\Reduce.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/ReduceOps.h> namespace at::native { template <typename scalar_t, typename out_t=scalar_t> void std_var_kernel_impl(TensorIterator& iter, double correction, bool take_sqrt) { using accscalar_t = at::acc_type<scalar_t, true>; using ops_t = WelfordOps<scalar_t, accscalar_t, int32_t, thrust::pair<out_t, out_t>>; ops_t ops(static_cast<accscalar_t>(correction), take_sqrt); gpu_reduce_kernel<scalar_t, out_t, 2>(iter, ops, typename ops_t::acc_t{}); } static void std_var_kernel_hip(TensorIterator& iter, double correction, bool take_sqrt) { const auto input_dtype = iter.input_dtype(); if (input_dtype == kHalf && iter.dtype() == kFloat) { std_var_kernel_impl<at::Half, float>(iter, correction, take_sqrt); } else if (input_dtype == kBFloat16 && iter.dtype() == kFloat) { std_var_kernel_impl<at::BFloat16, float>(iter, correction, take_sqrt); } else { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "std_hip", [&]() { std_var_kernel_impl<scalar_t>(iter, correction, take_sqrt); }); } } template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t> void mean_kernel_impl(TensorIterator& iter) { using factor_t = typename c10::scalar_value_type<acc_t>::type; factor_t factor = static_cast<factor_t>(iter.num_output_elements()) / iter.numel(); gpu_reduce_kernel<scalar_t, out_t>(iter, MeanOps<scalar_t, acc_t, factor_t, out_t> {factor}); } static void mean_kernel_hip(TensorIterator& iter) { if (iter.dtype() == kHalf) { mean_kernel_impl<at::Half, float>(iter); } else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) { mean_kernel_impl<at::Half, float, float>(iter); } else if(iter.dtype() == kBFloat16) { mean_kernel_impl<at::BFloat16, float>(iter); } else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) { mean_kernel_impl<at::BFloat16, float, float>(iter); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "mean_hip", [&]() { mean_kernel_impl<scalar_t>(iter); }); } } REGISTER_DISPATCH(std_var_stub, &std_var_kernel_hip); REGISTER_DISPATCH(mean_stub, &mean_kernel_hip); } ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/ReduceOps.h> namespace at::native { template <typename scalar_t, typename out_t=scalar_t> void std_var_kernel_impl(TensorIterator& iter, double correction, bool take_sqrt) { using accscalar_t = at::acc_type<scalar_t, true>; using ops_t = WelfordOps<scalar_t, accscalar_t, int32_t, thrust::pair<out_t, out_t>>; ops_t ops(static_cast<accscalar_t>(correction), take_sqrt); gpu_reduce_kernel<scalar_t, out_t, 2>(iter, ops, typename ops_t::acc_t{}); } static void std_var_kernel_cuda(TensorIterator& iter, double correction, bool take_sqrt) { const auto input_dtype = iter.input_dtype(); if (input_dtype == kHalf && iter.dtype() == kFloat) { std_var_kernel_impl<at::Half, float>(iter, correction, take_sqrt); } else if (input_dtype == kBFloat16 && iter.dtype() == kFloat) { std_var_kernel_impl<at::BFloat16, float>(iter, correction, take_sqrt); } else { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "std_cuda", [&]() { std_var_kernel_impl<scalar_t>(iter, correction, take_sqrt); }); } } template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t> void mean_kernel_impl(TensorIterator& iter) { using factor_t = typename c10::scalar_value_type<acc_t>::type; factor_t factor = static_cast<factor_t>(iter.num_output_elements()) / iter.numel(); gpu_reduce_kernel<scalar_t, out_t>(iter, MeanOps<scalar_t, acc_t, factor_t, out_t> {factor}); } static void mean_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == kHalf) { mean_kernel_impl<at::Half, float>(iter); } else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) { mean_kernel_impl<at::Half, float, float>(iter); } else if(iter.dtype() == kBFloat16) { mean_kernel_impl<at::BFloat16, float>(iter); } else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) { mean_kernel_impl<at::BFloat16, float, float>(iter); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "mean_cuda", [&]() { mean_kernel_impl<scalar_t>(iter); }); } } REGISTER_DISPATCH(std_var_stub, &std_var_kernel_cuda); REGISTER_DISPATCH(mean_stub, &mean_kernel_cuda); } ###
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/TensorIterator.h> #include <ATen/native/hip\Reduce.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/LinearAlgebra.h> #include <c10/core/Scalar.h> namespace at::native { // This reduction accumulates results as the type `acc_t`. By default, when // `scalar_t` is complex, `acc_t` is the downgraded real number type. // Otherwise, `acc_t` and `scalar_t` are the same type. template <typename scalar_t, typename acc_t=typename scalar_value_type<scalar_t>::type, typename out_t=typename scalar_value_type<scalar_t>::type> void norm_kernel_hip_impl(TensorIterator& iter, double p) { if (p == static_cast<double>(0)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormZeroOps<scalar_t, acc_t, out_t>(), 0); } else if (p == static_cast<double>(1)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOneOps<scalar_t, acc_t, out_t>(), 0); } else if (p == static_cast<double>(2)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormTwoOps<scalar_t, acc_t, out_t>(), 0); } else if (p == static_cast<double>(INFINITY)) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMaxOps<scalar_t, acc_t, out_t>(), 0); } else if (p == static_cast<double>(-INFINITY)) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMinOps<scalar_t, acc_t, out_t>(), std::numeric_limits<acc_t>::infinity()); } else { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOps<scalar_t, acc_t, out_t>{acc_t(p)}, 0); } } void norm_launch_kernel(TensorIterator& iter, double ord) { if (iter.dtype(0) == kHalf) { return norm_kernel_hip_impl<at::Half, float>(iter, ord); } else if (iter.input_dtype() == kHalf && iter.dtype(0) == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_hip_impl<at::Half, float, float>(iter, ord); } else if(iter.dtype(0) == kBFloat16) { return norm_kernel_hip_impl<at::BFloat16, float>(iter, ord); } else if (iter.input_dtype() == kBFloat16 && iter.dtype(0) == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_hip_impl<at::BFloat16, float, float>(iter, ord); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.input_dtype(), "norm_hip", [&] { norm_kernel_hip_impl<scalar_t>(iter, ord); }); } } // namespace at::native ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/TensorIterator.h> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/LinearAlgebra.h> #include <c10/core/Scalar.h> namespace at::native { // This reduction accumulates results as the type `acc_t`. By default, when // `scalar_t` is complex, `acc_t` is the downgraded real number type. // Otherwise, `acc_t` and `scalar_t` are the same type. template <typename scalar_t, typename acc_t=typename scalar_value_type<scalar_t>::type, typename out_t=typename scalar_value_type<scalar_t>::type> void norm_kernel_cuda_impl(TensorIterator& iter, double p) { if (p == static_cast<double>(0)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormZeroOps<scalar_t, acc_t, out_t>(), 0); } else if (p == static_cast<double>(1)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOneOps<scalar_t, acc_t, out_t>(), 0); } else if (p == static_cast<double>(2)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormTwoOps<scalar_t, acc_t, out_t>(), 0); } else if (p == static_cast<double>(INFINITY)) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMaxOps<scalar_t, acc_t, out_t>(), 0); } else if (p == static_cast<double>(-INFINITY)) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMinOps<scalar_t, acc_t, out_t>(), std::numeric_limits<acc_t>::infinity()); } else { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOps<scalar_t, acc_t, out_t>{acc_t(p)}, 0); } } void norm_launch_kernel(TensorIterator& iter, double ord) { if (iter.dtype(0) == kHalf) { return norm_kernel_cuda_impl<at::Half, float>(iter, ord); } else if (iter.input_dtype() == kHalf && iter.dtype(0) == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::Half, float, float>(iter, ord); } else if(iter.dtype(0) == kBFloat16) { return norm_kernel_cuda_impl<at::BFloat16, float>(iter, ord); } else if (iter.input_dtype() == kBFloat16 && iter.dtype(0) == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::BFloat16, float, float>(iter, ord); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.input_dtype(), "norm_cuda", [&] { norm_kernel_cuda_impl<scalar_t>(iter, ord); }); } } // namespace at::native ###
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/Normalization.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip\Loops.cuh> #include <ATen/Dispatch.h> namespace at::native { namespace { void renorm_scale_factor_impl(TensorIteratorBase& iter, double maxnorm) { AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "renorm_scale_factor_cpu", [&] { const auto maxnorm_s = static_cast<scalar_t>(maxnorm); gpu_kernel( iter, [maxnorm_s] GPU_LAMBDA (scalar_t norm) -> scalar_t { const auto eps = static_cast<scalar_t>(1e-7); const auto one = static_cast<scalar_t>(1.0); return (norm > maxnorm_s) ? maxnorm_s / (norm + eps) : one; }); }); } } // namespace (anonymous) REGISTER_DISPATCH(renorm_scale_factor_stub, &renorm_scale_factor_impl); } // namespace at::native ###
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/Normalization.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/Dispatch.h> namespace at::native { namespace { void renorm_scale_factor_impl(TensorIteratorBase& iter, double maxnorm) { AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "renorm_scale_factor_cpu", [&] { const auto maxnorm_s = static_cast<scalar_t>(maxnorm); gpu_kernel( iter, [maxnorm_s] GPU_LAMBDA (scalar_t norm) -> scalar_t { const auto eps = static_cast<scalar_t>(1e-7); const auto one = static_cast<scalar_t>(1.0); return (norm > maxnorm_s) ? maxnorm_s / (norm + eps) : one; }); }); } } // namespace (anonymous) REGISTER_DISPATCH(renorm_scale_factor_stub, &renorm_scale_factor_impl); } // namespace at::native ###
// !!! This is a file automatically generated by hipify!!! #include <ATen/hip/detail\IndexUtils.cuh> #include <vector> namespace at { namespace hip { namespace detail { struct SizeAndStride { int64_t size; int64_t stride; }; /* A comparator that will sort SizeAndStride structs by stride, in ascending order. */ int compareSizeAndStride(const void* a, const void* b) { const SizeAndStride* aS = (const SizeAndStride*) a; const SizeAndStride* bS = (const SizeAndStride*) b; if (aS->stride < bS->stride) return -1; if (aS->stride == bS->stride) return 0; return 1; } /* Returns false if there is no possibility that the tensor has "overlapping" indices and true otherwise. "Overlapping" indices are two+ valid indices that specify the same offset within the tensor. The function does this by checking for a sufficient but not necessary condition of no overlap. In particular, that that there exists an ordering of the tensor's dimensions that is nicely "nested," with each dimension contained within the next one. */ bool maybeOverlappingIndices(const TensorBase& t) { /* Extract size/stride arrays; only consider size >1 dims. */ std::vector<SizeAndStride> info(t.dim()); int dims = t.dim(); int nonSize1Dims = 0; for (int i = 0; i < dims; ++i) { int64_t size = t.size(i); if (size > 1) { info[nonSize1Dims].size = size; info[nonSize1Dims].stride = t.stride(i); if (info[nonSize1Dims].stride < 1) { return true; } ++nonSize1Dims; } } // Short-circuits if tensor is a single element. if (nonSize1Dims == 0) { return false; } /* Ascending order (innermost dimension in sorted view is at [0]) */ qsort(info.data(), nonSize1Dims, sizeof(SizeAndStride), compareSizeAndStride); for (int i = 0; i < (nonSize1Dims - 1); ++i) { if (((info[i].size - 1) * info[i].stride) >= info[i + 1].stride) { return true; } } return false; } } // detail } // cuda } // at ###
#include <ATen/cuda/detail/IndexUtils.cuh> #include <vector> namespace at { namespace cuda { namespace detail { struct SizeAndStride { int64_t size; int64_t stride; }; /* A comparator that will sort SizeAndStride structs by stride, in ascending order. */ int compareSizeAndStride(const void* a, const void* b) { const SizeAndStride* aS = (const SizeAndStride*) a; const SizeAndStride* bS = (const SizeAndStride*) b; if (aS->stride < bS->stride) return -1; if (aS->stride == bS->stride) return 0; return 1; } /* Returns false if there is no possibility that the tensor has "overlapping" indices and true otherwise. "Overlapping" indices are two+ valid indices that specify the same offset within the tensor. The function does this by checking for a sufficient but not necessary condition of no overlap. In particular, that that there exists an ordering of the tensor's dimensions that is nicely "nested," with each dimension contained within the next one. */ bool maybeOverlappingIndices(const TensorBase& t) { /* Extract size/stride arrays; only consider size >1 dims. */ std::vector<SizeAndStride> info(t.dim()); int dims = t.dim(); int nonSize1Dims = 0; for (int i = 0; i < dims; ++i) { int64_t size = t.size(i); if (size > 1) { info[nonSize1Dims].size = size; info[nonSize1Dims].stride = t.stride(i); if (info[nonSize1Dims].stride < 1) { return true; } ++nonSize1Dims; } } // Short-circuits if tensor is a single element. if (nonSize1Dims == 0) { return false; } /* Ascending order (innermost dimension in sorted view is at [0]) */ qsort(info.data(), nonSize1Dims, sizeof(SizeAndStride), compareSizeAndStride); for (int i = 0; i < (nonSize1Dims - 1); ++i) { if (((info[i].size - 1) * info[i].stride) >= info[i + 1].stride) { return true; } } return false; } } // detail } // cuda } // at ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once namespace onnxruntime { namespace rocm { struct CtxAlpha { float alpha; }; struct CtxAlphaBeta { float alpha; float beta; }; struct CtxAlphaGamma { float alpha; float gamma; }; struct CtxNull { }; typedef CtxAlpha CtxElu; typedef CtxAlphaBeta CtxHardSigmoid; typedef CtxAlpha CtxLeakyRelu; typedef CtxNull CtxRelu; typedef CtxAlphaGamma CtxSelu; typedef CtxNull CtxSigmoid; typedef CtxNull CtxSoftplus; typedef CtxNull CtxSoftsign; typedef CtxNull CtxTanh; typedef CtxAlpha CtxThresholdedRelu; #define UNARY_ACTIVATION_OPS() \ UNARY_ACTIVATION_OP_NAME(Elu) \ UNARY_ACTIVATION_OP_NAME(HardSigmoid) \ UNARY_ACTIVATION_OP_NAME(LeakyRelu) \ UNARY_ACTIVATION_OP_NAME(Relu) \ UNARY_ACTIVATION_OP_NAME(Selu) \ UNARY_ACTIVATION_OP_NAME(Sigmoid) \ UNARY_ACTIVATION_OP_NAME(Softplus) \ UNARY_ACTIVATION_OP_NAME(Softsign) \ UNARY_ACTIVATION_OP_NAME(Tanh) \ UNARY_ACTIVATION_OP_NAME(ThresholdedRelu) #define UNARY_ACTIVATION_IMPL_DECLARATION(name) \ template <typename T> \ void Impl_##name( \ hipStream_t stream, \ const T* input_data, \ T* output_data, \ const Ctx##name* func_ctx, \ size_t count) #define UNARY_ACTIVATION_OP_NAME(name) UNARY_ACTIVATION_IMPL_DECLARATION(name); UNARY_ACTIVATION_OPS() #undef UNARY_ACTIVATION_OP_NAME } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once namespace onnxruntime { namespace cuda { struct CtxAlpha { float alpha; }; struct CtxAlphaBeta { float alpha; float beta; }; struct CtxAlphaGamma { float alpha; float gamma; }; struct CtxNull { }; typedef CtxAlpha CtxElu; typedef CtxAlphaBeta CtxHardSigmoid; typedef CtxAlpha CtxLeakyRelu; typedef CtxNull CtxRelu; typedef CtxAlphaGamma CtxSelu; typedef CtxNull CtxSigmoid; typedef CtxNull CtxSoftplus; typedef CtxNull CtxSoftsign; typedef CtxNull CtxTanh; typedef CtxAlpha CtxThresholdedRelu; #define UNARY_ACTIVATION_OPS() \ UNARY_ACTIVATION_OP_NAME(Elu) \ UNARY_ACTIVATION_OP_NAME(HardSigmoid) \ UNARY_ACTIVATION_OP_NAME(LeakyRelu) \ UNARY_ACTIVATION_OP_NAME(Relu) \ UNARY_ACTIVATION_OP_NAME(Selu) \ UNARY_ACTIVATION_OP_NAME(Sigmoid) \ UNARY_ACTIVATION_OP_NAME(Softplus) \ UNARY_ACTIVATION_OP_NAME(Softsign) \ UNARY_ACTIVATION_OP_NAME(Tanh) \ UNARY_ACTIVATION_OP_NAME(ThresholdedRelu) #define UNARY_ACTIVATION_IMPL_DECLARATION(name) \ template <typename T> \ void Impl_##name( \ cudaStream_t stream, \ const T* input_data, \ T* output_data, \ const Ctx##name* func_ctx, \ size_t count) #define UNARY_ACTIVATION_OP_NAME(name) UNARY_ACTIVATION_IMPL_DECLARATION(name); UNARY_ACTIVATION_OPS() #undef UNARY_ACTIVATION_OP_NAME } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/shared_library/provider_api.h" #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace rocm { class ScatterElements final : public RocmKernel { public: ScatterElements(const OpKernelInfo& info) : RocmKernel(info) { ORT_ENFORCE(info.GetAttr<int64_t>("axis", &axis_).IsOK(), "Missing/Invalid 'axis' attribute value"); } ~ScatterElements() = default; Status ComputeInternal(OpKernelContext* context) const override; private: template <typename T> struct ComputeImpl; int64_t axis_; }; } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/shared_library/provider_api.h" #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace cuda { class ScatterElements final : public CudaKernel { public: ScatterElements(const OpKernelInfo& info) : CudaKernel(info) { ORT_ENFORCE(info.GetAttr<int64_t>("axis", &axis_).IsOK(), "Missing/Invalid 'axis' attribute value"); } ~ScatterElements() = default; Status ComputeInternal(OpKernelContext* context) const override; private: template <typename T> struct ComputeImpl; int64_t axis_; }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/rocm/shared_inc/rocm_utils.h" namespace onnxruntime { namespace rocm { struct GatherScatterElementsArgs; template <typename T, typename TIndex> Status ScatterElementsImpl(hipStream_t stream, const T* input_data, const TIndex* indices_data, const T* updates_data, T* output_data, const GatherScatterElementsArgs& args); } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/cuda/shared_inc/cuda_utils.h" namespace onnxruntime { namespace cuda { struct GatherScatterElementsArgs; template <typename T, typename TIndex> Status ScatterElementsImpl(cudaStream_t stream, const T* input_data, const TIndex* indices_data, const T* updates_data, T* output_data, const GatherScatterElementsArgs& args); } // namespace cuda } // namespace onnxruntime ###
#include "core/providers/rocm/tensor/scatter_nd.h" #include "core/providers/rocm/tensor/scatter_nd_impl.h" #include "core/providers/rocm/shared_inc/rocm_utils.h" #include "core/providers/cpu/tensor/utils.h" namespace onnxruntime { namespace rocm { ONNX_OPERATOR_VERSIONED_KERNEL_EX(ScatterND, kOnnxDomain, 11, 12, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .MayInplace(0, 0), ScatterND); ONNX_OPERATOR_KERNEL_EX(ScatterND, kOnnxDomain, 13, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .MayInplace(0, 0), ScatterND); Status ScatterND::ComputeInternal(OpKernelContext* context) const { const auto* input_tensor = context->Input<Tensor>(0); const auto* indices_tensor = context->Input<Tensor>(1); const auto* updates_tensor = context->Input<Tensor>(2); const auto& input_shape = input_tensor->Shape(); const auto& indices_shape = indices_tensor->Shape(); const auto& updates_shape = updates_tensor->Shape(); ORT_RETURN_IF_ERROR(onnxruntime::ScatterND::ValidateShapes(input_shape, indices_shape, updates_shape)); auto* output_tensor = context->Output(0, input_shape); const void* input_data = input_tensor->DataRaw(); void* output_data = output_tensor->MutableDataRaw(); size_t element_size = input_tensor->DataType()->Size(); if (input_data != output_data) { HIP_RETURN_IF_ERROR( hipMemcpyAsync(output_data, input_data, input_tensor->SizeInBytes(), hipMemcpyDeviceToDevice, Stream(context))); } if (indices_shape.Size() == 0) { return Status::OK(); } auto last_index_dimension = indices_shape[indices_shape.NumDimensions() - 1]; TensorPitches input_strides(input_shape); std::vector<int64_t> element_counts_and_input_dims(last_index_dimension * 2, 0LL); for (int64_t i = 0; i < last_index_dimension; ++i) { element_counts_and_input_dims[i] = input_strides[i]; element_counts_and_input_dims[i + last_index_dimension] = input_shape[i]; } RocmAsyncBuffer<int64_t> element_counts_and_input_dims_gpu(this, element_counts_and_input_dims); ORT_RETURN_IF_ERROR(element_counts_and_input_dims_gpu.CopyToGpu(context->GetComputeStream())); ORT_RETURN_IF_ERROR(ScatterNDImpl( Stream(context), output_data, element_size, indices_shape.Size() / static_cast<size_t>(last_index_dimension), indices_tensor->Data<int64_t>(), last_index_dimension, element_counts_and_input_dims_gpu.GpuPtr(), updates_tensor->DataRaw(), input_shape.SizeFromDimension(last_index_dimension))); return Status::OK(); } } } ###
#include "core/providers/cuda/tensor/scatter_nd.h" #include "core/providers/cuda/tensor/scatter_nd_impl.h" #include "core/providers/cuda/shared_inc/cuda_utils.h" #include "core/providers/cpu/tensor/utils.h" namespace onnxruntime { namespace cuda { ONNX_OPERATOR_VERSIONED_KERNEL_EX(ScatterND, kOnnxDomain, 11, 12, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .MayInplace(0, 0), ScatterND); ONNX_OPERATOR_KERNEL_EX(ScatterND, kOnnxDomain, 13, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .MayInplace(0, 0), ScatterND); Status ScatterND::ComputeInternal(OpKernelContext* context) const { const auto* input_tensor = context->Input<Tensor>(0); const auto* indices_tensor = context->Input<Tensor>(1); const auto* updates_tensor = context->Input<Tensor>(2); const auto& input_shape = input_tensor->Shape(); const auto& indices_shape = indices_tensor->Shape(); const auto& updates_shape = updates_tensor->Shape(); ORT_RETURN_IF_ERROR(onnxruntime::ScatterND::ValidateShapes(input_shape, indices_shape, updates_shape)); auto* output_tensor = context->Output(0, input_shape); const void* input_data = input_tensor->DataRaw(); void* output_data = output_tensor->MutableDataRaw(); size_t element_size = input_tensor->DataType()->Size(); if (input_data != output_data) { CUDA_RETURN_IF_ERROR( cudaMemcpyAsync(output_data, input_data, input_tensor->SizeInBytes(), cudaMemcpyDeviceToDevice, Stream(context))); } if (indices_shape.Size() == 0) { return Status::OK(); } auto last_index_dimension = indices_shape[indices_shape.NumDimensions() - 1]; TensorPitches input_strides(input_shape); std::vector<int64_t> element_counts_and_input_dims(last_index_dimension * 2, 0LL); for (int64_t i = 0; i < last_index_dimension; ++i) { element_counts_and_input_dims[i] = input_strides[i]; element_counts_and_input_dims[i + last_index_dimension] = input_shape[i]; } CudaAsyncBuffer<int64_t> element_counts_and_input_dims_gpu(this, element_counts_and_input_dims); ORT_RETURN_IF_ERROR(element_counts_and_input_dims_gpu.CopyToGpu(context->GetComputeStream())); ORT_RETURN_IF_ERROR(ScatterNDImpl( Stream(context), output_data, element_size, indices_shape.Size() / static_cast<size_t>(last_index_dimension), indices_tensor->Data<int64_t>(), last_index_dimension, element_counts_and_input_dims_gpu.GpuPtr(), updates_tensor->DataRaw(), input_shape.SizeFromDimension(last_index_dimension))); return Status::OK(); } } } ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/shared_library/provider_api.h" #include "core/providers/rocm/rocm_kernel.h" #include "core/providers/cpu/tensor/scatter_nd.h" namespace onnxruntime { namespace rocm { class ScatterND final : public RocmKernel { public: explicit ScatterND(const OpKernelInfo& info) : RocmKernel(info) {} Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/shared_library/provider_api.h" #include "core/providers/cuda/cuda_kernel.h" #include "core/providers/cpu/tensor/scatter_nd.h" namespace onnxruntime { namespace cuda { class ScatterND final : public CudaKernel { public: explicit ScatterND(const OpKernelInfo& info) : CudaKernel(info) {} Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/shared_inc/rocm_utils.h" namespace onnxruntime { namespace rocm { Status ScatterNDImpl( hipStream_t stream, void* output_data, const size_t element_size, const size_t num_indices, const int64_t* indices_data, const int64_t last_index_dimension, const int64_t* element_counts_and_input_dims, const void* updates_data, const size_t num_updates_elements); } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/shared_inc/cuda_utils.h" namespace onnxruntime { namespace cuda { Status ScatterNDImpl( cudaStream_t stream, void* output_data, const size_t element_size, const size_t num_indices, const int64_t* indices_data, const int64_t last_index_dimension, const int64_t* element_counts_and_input_dims, const void* updates_data, const size_t num_updates_elements); } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "sequence_op.h" namespace onnxruntime { namespace rocm { ONNX_OPERATOR_KERNEL_EX( SequenceAt, kOnnxDomain, 11, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .InputMemoryType(OrtMemTypeCPUInput, 1) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .TypeConstraint("I", std::vector<MLDataType>{ DataTypeImpl::GetTensorType<int32_t>(), DataTypeImpl::GetTensorType<int64_t>()}), SequenceAt); ONNX_OPERATOR_KERNEL_EX( SequenceConstruct, kOnnxDomain, 11, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()), SequenceConstruct); ONNX_OPERATOR_KERNEL_EX( SequenceEmpty, kOnnxDomain, 11, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()), SequenceEmpty); ONNX_OPERATOR_KERNEL_EX( SequenceLength, kOnnxDomain, 11, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()) .TypeConstraint("I", DataTypeImpl::GetTensorType<int64_t>()), SequenceLength); ONNX_OPERATOR_KERNEL_EX( ConcatFromSequence, kOnnxDomain, 11, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()), ConcatFromSequence); ONNX_OPERATOR_KERNEL_EX( SequenceErase, kOnnxDomain, 11, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .InputMemoryType(OrtMemTypeCPUInput, 1) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()) .TypeConstraint("I", std::vector<MLDataType>{ DataTypeImpl::GetTensorType<int32_t>(), DataTypeImpl::GetTensorType<int64_t>()}), SequenceErase); ONNX_OPERATOR_KERNEL_EX( SequenceInsert, kOnnxDomain, 11, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .InputMemoryType(OrtMemTypeCPUInput, 2) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()) .TypeConstraint("I", std::vector<MLDataType>{ DataTypeImpl::GetTensorType<int32_t>(), DataTypeImpl::GetTensorType<int64_t>()}), SequenceInsert); } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "sequence_op.h" namespace onnxruntime { namespace cuda { ONNX_OPERATOR_KERNEL_EX( SequenceAt, kOnnxDomain, 11, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .InputMemoryType(OrtMemTypeCPUInput, 1) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .TypeConstraint("I", std::vector<MLDataType>{ DataTypeImpl::GetTensorType<int32_t>(), DataTypeImpl::GetTensorType<int64_t>()}), SequenceAt); ONNX_OPERATOR_KERNEL_EX( SequenceConstruct, kOnnxDomain, 11, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()), SequenceConstruct); ONNX_OPERATOR_KERNEL_EX( SequenceEmpty, kOnnxDomain, 11, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()), SequenceEmpty); ONNX_OPERATOR_KERNEL_EX( SequenceLength, kOnnxDomain, 11, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()) .TypeConstraint("I", DataTypeImpl::GetTensorType<int64_t>()), SequenceLength); ONNX_OPERATOR_KERNEL_EX( ConcatFromSequence, kOnnxDomain, 11, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()), ConcatFromSequence); ONNX_OPERATOR_KERNEL_EX( SequenceErase, kOnnxDomain, 11, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .InputMemoryType(OrtMemTypeCPUInput, 1) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()) .TypeConstraint("I", std::vector<MLDataType>{ DataTypeImpl::GetTensorType<int32_t>(), DataTypeImpl::GetTensorType<int64_t>()}), SequenceErase); ONNX_OPERATOR_KERNEL_EX( SequenceInsert, kOnnxDomain, 11, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .InputMemoryType(OrtMemTypeCPUInput, 2) .TypeConstraint("S", DataTypeImpl::AllFixedSizeSequenceTensorTypes()) .TypeConstraint("I", std::vector<MLDataType>{ DataTypeImpl::GetTensorType<int32_t>(), DataTypeImpl::GetTensorType<int64_t>()}), SequenceInsert); } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/cpu/tensor/shape_op.h" #include "core/providers/rocm/rocm_fwd.h" namespace onnxruntime { namespace rocm { ONNX_OPERATOR_VERSIONED_KERNEL_EX( Shape, kOnnxDomain, 1, 12, kRocmExecutionProvider, (*KernelDefBuilder::Create()) // properly force CPU/GPU synch inside the kernel .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()), Shape); ONNX_OPERATOR_VERSIONED_KERNEL_EX( Shape, kOnnxDomain, 13, 14, kRocmExecutionProvider, (*KernelDefBuilder::Create()) // properly force CPU/GPU synch inside the kernel .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()), Shape); ONNX_OPERATOR_VERSIONED_KERNEL_EX( Shape, kOnnxDomain, 15, 18, kRocmExecutionProvider, (*KernelDefBuilder::Create()) // properly force CPU/GPU synch inside the kernel .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()), Shape); ONNX_OPERATOR_KERNEL_EX( Shape, kOnnxDomain, 19, kRocmExecutionProvider, (*KernelDefBuilder::Create()) // properly force CPU/GPU synch inside the kernel .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypesIRv9()) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()), Shape); } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/cpu/tensor/shape_op.h" #include "core/providers/cuda/cuda_fwd.h" namespace onnxruntime { namespace cuda { ONNX_OPERATOR_VERSIONED_KERNEL_EX( Shape, kOnnxDomain, 1, 12, kCudaExecutionProvider, (*KernelDefBuilder::Create()) // properly force CPU/GPU synch inside the kernel .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()), Shape); ONNX_OPERATOR_VERSIONED_KERNEL_EX( Shape, kOnnxDomain, 13, 14, kCudaExecutionProvider, (*KernelDefBuilder::Create()) // properly force CPU/GPU synch inside the kernel .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()), Shape); ONNX_OPERATOR_VERSIONED_KERNEL_EX( Shape, kOnnxDomain, 15, 18, kCudaExecutionProvider, (*KernelDefBuilder::Create()) // properly force CPU/GPU synch inside the kernel .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()), Shape); ONNX_OPERATOR_KERNEL_EX( Shape, kOnnxDomain, 19, kCudaExecutionProvider, (*KernelDefBuilder::Create()) // properly force CPU/GPU synch inside the kernel .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypesIRv9()) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()), Shape); } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/cpu/tensor/size.h" #include "core/providers/rocm/rocm_fwd.h" namespace onnxruntime { namespace rocm { ONNX_OPERATOR_VERSIONED_KERNEL_EX( Size, kOnnxDomain, 1, 12, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T", DataTypeImpl::AllTensorTypes()) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()), Size); ONNX_OPERATOR_KERNEL_EX( Size, kOnnxDomain, 13, kRocmExecutionProvider, (*KernelDefBuilder::Create()) // properly force CPU/GPU synch inside the kernel .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T", DataTypeImpl::AllTensorTypes()) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()), Size); } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/cpu/tensor/size.h" #include "core/providers/cuda/cuda_fwd.h" namespace onnxruntime { namespace cuda { ONNX_OPERATOR_VERSIONED_KERNEL_EX( Size, kOnnxDomain, 1, 12, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T", DataTypeImpl::AllTensorTypes()) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()), Size); ONNX_OPERATOR_KERNEL_EX( Size, kOnnxDomain, 13, kCudaExecutionProvider, (*KernelDefBuilder::Create()) // properly force CPU/GPU synch inside the kernel .OutputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T", DataTypeImpl::AllTensorTypes()) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()), Size); } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/rocm/rocm_kernel.h" #include "core/providers/cpu/tensor/slice.h" #include "core/providers/cpu/tensor/utils.h" namespace onnxruntime { namespace rocm { namespace SliceRocm { Status Impl(hipStream_t stream, const void* input_data, const TensorShape& input_shape, void* output_data, SliceOp::PrepareForComputeMetadata& prepare_metadata, size_t element_size); } // namespace SliceRocm template <bool dynamic> class Slice : public RocmKernel, public SliceBase { public: Slice(const OpKernelInfo& info) : RocmKernel(info), SliceBase(info, dynamic) {} Status ComputeInternal(OpKernelContext* ctx) const override; private: virtual const Tensor* GetSlicedOrUnslicedTensor(OpKernelContext* ctx) const; virtual Status FillInputVectors(OpKernelContext* ctx, TensorShapeVector& input_starts, TensorShapeVector& input_ends, TensorShapeVector& input_axes, TensorShapeVector& input_steps) const; virtual Status CallSliceImp(size_t element_size, size_t dimension_count, const TArray<int64_t>& starts_buffer, const TArray<int64_t>& steps_buffer, const TArray<int64_t>& input_strides, const TArray<fast_divmod>& output_strides, OpKernelContext* ctx, const TensorShape& output_shape) const; }; } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/cuda/cuda_kernel.h" #include "core/providers/cpu/tensor/slice.h" #include "core/providers/cpu/tensor/utils.h" namespace onnxruntime { namespace cuda { namespace SliceCuda { Status Impl(cudaStream_t stream, const void* input_data, const TensorShape& input_shape, void* output_data, SliceOp::PrepareForComputeMetadata& prepare_metadata, size_t element_size); } // namespace SliceCuda template <bool dynamic> class Slice : public CudaKernel, public SliceBase { public: Slice(const OpKernelInfo& info) : CudaKernel(info), SliceBase(info, dynamic) {} Status ComputeInternal(OpKernelContext* ctx) const override; private: virtual const Tensor* GetSlicedOrUnslicedTensor(OpKernelContext* ctx) const; virtual Status FillInputVectors(OpKernelContext* ctx, TensorShapeVector& input_starts, TensorShapeVector& input_ends, TensorShapeVector& input_axes, TensorShapeVector& input_steps) const; virtual Status CallSliceImp(size_t element_size, size_t dimension_count, const TArray<int64_t>& starts_buffer, const TArray<int64_t>& steps_buffer, const TArray<int64_t>& input_strides, const TArray<fast_divmod>& output_strides, OpKernelContext* ctx, const TensorShape& output_shape) const; }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/rocm/shared_inc/rocm_utils.h" namespace onnxruntime { namespace rocm { Status SliceImpl(hipStream_t stream, const size_t element_size, const int32_t dimension_count, const TArray<int64_t>& starts, const TArray<int64_t>& steps, const TArray<int64_t>& input_strides, const TArray<fast_divmod>& output_strides, const void* input_data, void* output_data, const size_t N); #ifdef ENABLE_TRAINING_OPS Status SliceImplGrad(hipStream_t stream, const size_t element_size, const int32_t dimension_count, const TArray<int64_t>& starts, const TArray<int64_t>& steps, const TArray<int64_t>& input_strides, const TArray<fast_divmod>& output_strides, const void* input_data, void* output_data, const size_t N); #endif // ENABLE_TRAINING_OPS } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/cuda/shared_inc/cuda_utils.h" namespace onnxruntime { namespace cuda { Status SliceImpl(cudaStream_t stream, const size_t element_size, const int32_t dimension_count, const TArray<int64_t>& starts, const TArray<int64_t>& steps, const TArray<int64_t>& input_strides, const TArray<fast_divmod>& output_strides, const void* input_data, void* output_data, const size_t N); #ifdef ENABLE_TRAINING_OPS Status SliceImplGrad(cudaStream_t stream, const size_t element_size, const int32_t dimension_count, const TArray<int64_t>& starts, const TArray<int64_t>& steps, const TArray<int64_t>& input_strides, const TArray<fast_divmod>& output_strides, const void* input_data, void* output_data, const size_t N); #endif // ENABLE_TRAINING_OPS } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "constant_of_shape.h" using namespace ::onnxruntime::common; using namespace ONNX_NAMESPACE; namespace onnxruntime { namespace rocm { ONNX_OPERATOR_KERNEL_EX( ConstantOfShape, kOnnxDomain, 9, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .InputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()) .TypeConstraint("T2", DataTypeImpl::AllFixedSizeTensorTypes()), ConstantOfShape); Status ConstantOfShape::ComputeInternal(OpKernelContext* ctx) const { Tensor* output_tensor = nullptr; ORT_RETURN_IF_ERROR(PrepareCompute(ctx, &output_tensor)); auto output_data = output_tensor->MutableDataRaw(); const auto size = output_tensor->Shape().Size(); const void* value_ptr = GetValuePtr(); const auto element_size = output_tensor->DataType()->Size(); #define CASE(TYPE) \ case sizeof(TYPE): \ if (size > 0) { \ rocm::Fill(Stream(ctx), reinterpret_cast<TYPE*>(output_data), *(reinterpret_cast<const TYPE*>(value_ptr)), size); \ } \ break; switch (element_size) { CASE(int8_t) CASE(int16_t) CASE(int32_t) CASE(int64_t) default: ORT_THROW("Unsupported value attribute datatype with sizeof=: ", element_size); break; } return Status::OK(); } } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "constant_of_shape.h" using namespace ::onnxruntime::common; using namespace ONNX_NAMESPACE; namespace onnxruntime { namespace cuda { ONNX_OPERATOR_KERNEL_EX( ConstantOfShape, kOnnxDomain, 9, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .InputMemoryType(OrtMemTypeCPUInput, 0) .TypeConstraint("T1", DataTypeImpl::GetTensorType<int64_t>()) .TypeConstraint("T2", DataTypeImpl::AllFixedSizeTensorTypes()), ConstantOfShape); Status ConstantOfShape::ComputeInternal(OpKernelContext* ctx) const { Tensor* output_tensor = nullptr; ORT_RETURN_IF_ERROR(PrepareCompute(ctx, &output_tensor)); auto output_data = output_tensor->MutableDataRaw(); const auto size = output_tensor->Shape().Size(); const void* value_ptr = GetValuePtr(); const auto element_size = output_tensor->DataType()->Size(); #define CASE(TYPE) \ case sizeof(TYPE): \ if (size > 0) { \ cuda::Fill(Stream(ctx), reinterpret_cast<TYPE*>(output_data), *(reinterpret_cast<const TYPE*>(value_ptr)), size); \ } \ break; switch (element_size) { CASE(int8_t) CASE(int16_t) CASE(int32_t) CASE(int64_t) default: ORT_THROW("Unsupported value attribute datatype with sizeof=: ", element_size); break; } return Status::OK(); } } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/rocm_kernel.h" #include "core/providers/cpu/tensor/space_depth_ops.h" namespace onnxruntime { namespace rocm { class SpaceToDepth final : public RocmKernel, SpaceDepthBase { public: explicit SpaceToDepth(const OpKernelInfo& info) : RocmKernel(info), SpaceDepthBase(info) { } Status ComputeInternal(OpKernelContext* context) const override; }; class DepthToSpace final : public RocmKernel, SpaceDepthBase { public: explicit DepthToSpace(const OpKernelInfo& info) : RocmKernel(info), SpaceDepthBase(info) { std::string mode; // if mode doesn't exist, then it is the default "DCR" mode // (or) it is an opset < 11 model for which the only mode is "DCR" mode if (info.GetAttr("mode", &mode).IsOK()) { if (mode == "CRD") is_dcr_ = false; else if (mode != "DCR") ORT_THROW("DepthToSpace op: only 'DCR' and 'CRD' modes are supported"); } } Status ComputeInternal(OpKernelContext* context) const override; private: bool is_dcr_ = true; }; } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/cuda_kernel.h" #include "core/providers/cpu/tensor/space_depth_ops.h" namespace onnxruntime { namespace cuda { class SpaceToDepth final : public CudaKernel, SpaceDepthBase { public: explicit SpaceToDepth(const OpKernelInfo& info) : CudaKernel(info), SpaceDepthBase(info) { } Status ComputeInternal(OpKernelContext* context) const override; }; class DepthToSpace final : public CudaKernel, SpaceDepthBase { public: explicit DepthToSpace(const OpKernelInfo& info) : CudaKernel(info), SpaceDepthBase(info) { std::string mode; // if mode doesn't exist, then it is the default "DCR" mode // (or) it is an opset < 11 model for which the only mode is "DCR" mode if (info.GetAttr("mode", &mode).IsOK()) { if (mode == "CRD") is_dcr_ = false; else if (mode != "DCR") ORT_THROW("DepthToSpace op: only 'DCR' and 'CRD' modes are supported"); } } Status ComputeInternal(OpKernelContext* context) const override; private: bool is_dcr_ = true; }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/rocm/rocm_kernel.h" #include "core/providers/cpu/tensor/split.h" namespace onnxruntime { namespace rocm { class SplitKernel : public RocmKernel, public SplitBase { public: SplitKernel(const OpKernelInfo& info, uint32_t opset) : RocmKernel(info), SplitBase(info, opset) {} Status ComputeInternal(OpKernelContext* context) const override; }; // versions 2, 11 and 13 class Split_2_13 final : public SplitKernel { public: // use opset 1 for all versions earlier than 18 Split_2_13(const OpKernelInfo& info) : SplitKernel(info, /* opset */ 1) {} }; class Split_18 final : public SplitKernel { public: Split_18(const OpKernelInfo& info) : SplitKernel(info, 18) {} }; } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/cuda/cuda_kernel.h" #include "core/providers/cpu/tensor/split.h" namespace onnxruntime { namespace cuda { class SplitKernel : public CudaKernel, public SplitBase { public: SplitKernel(const OpKernelInfo& info, uint32_t opset) : CudaKernel(info), SplitBase(info, opset) {} Status ComputeInternal(OpKernelContext* context) const override; }; // versions 2, 11 and 13 class Split_2_13 final : public SplitKernel { public: // use opset 1 for all versions earlier than 18 Split_2_13(const OpKernelInfo& info) : SplitKernel(info, /* opset */ 1) {} }; class Split_18 final : public SplitKernel { public: Split_18(const OpKernelInfo& info) : SplitKernel(info, 18) {} }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/rocm/shared_inc/rocm_utils.h" #include "core/common/common.h" namespace onnxruntime { namespace rocm { template <typename OutputDataArray> Status SplitSameSplitDimImpl(hipStream_t stream, const size_t element_size, const int block_size_including_axis_dim, const int block_size_inside_axis_dim, const int64_t split_size, const int num_outputs, const void* input_data, OutputDataArray output_data, const size_t input_size); Status SplitImpl(hipStream_t stream, const size_t element_size, const int block_size_including_axis_dim, const int block_size_inside_axis_dim, const int64_t* split_sizes, const int64_t* split_sizes_range, const int64_t* axis_dimension_input_output_mapping, const int num_outputs, const void* input_data, void** output_data, const size_t input_size); } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/cuda/shared_inc/cuda_utils.h" #include "core/common/common.h" namespace onnxruntime { namespace cuda { template <typename OutputDataArray> Status SplitSameSplitDimImpl(cudaStream_t stream, const size_t element_size, const int block_size_including_axis_dim, const int block_size_inside_axis_dim, const int64_t split_size, const int num_outputs, const void* input_data, OutputDataArray output_data, const size_t input_size); Status SplitImpl(cudaStream_t stream, const size_t element_size, const int block_size_including_axis_dim, const int block_size_inside_axis_dim, const int64_t* split_sizes, const int64_t* split_sizes_range, const int64_t* axis_dimension_input_output_mapping, const int num_outputs, const void* input_data, void** output_data, const size_t input_size); } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "squeeze.h" namespace onnxruntime { namespace rocm { ONNX_OPERATOR_VERSIONED_KERNEL_EX( Squeeze, kOnnxDomain, 1, 10, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .Alias(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()), Squeeze); // explicit support for negative axis. ONNX_OPERATOR_VERSIONED_KERNEL_EX( Squeeze, kOnnxDomain, 11, 12, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .Alias(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()), Squeeze); // axes is input instead of attribute ONNX_OPERATOR_KERNEL_EX( Squeeze, kOnnxDomain, 13, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .Alias(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .InputMemoryType(OrtMemTypeCPUInput, 1), Squeeze); Status Squeeze::ComputeInternal(OpKernelContext* ctx) const { const Tensor* X = ctx->Input<Tensor>(0); const TensorShape& X_shape = X->Shape(); TensorShapeVector axes; size_t num_inputs = ctx->InputCount(); if (num_inputs == 2) { // axes is an input const Tensor* axes_tensor = ctx->Input<Tensor>(1); ORT_ENFORCE(axes_tensor != nullptr, "Axes input is null"); ORT_ENFORCE(axes_tensor->Shape().NumDimensions() == 1, "An axes tensor must be a vector tensor."); auto nDims = static_cast<size_t>(axes_tensor->Shape()[0]); const auto* data = axes_tensor->Data<int64_t>(); axes.assign(data, data + nDims); } else { axes.assign(axes_.begin(), axes_.end()); } TensorShapeVector output_shape = ComputeOutputShape(X_shape, axes); Tensor* Y = ctx->Output(0, TensorShape(output_shape)); const void* input = X->DataRaw(); void* output = Y->MutableDataRaw(); if (input == output) return Status::OK(); auto count = X->Shape().Size(); auto element_bytes = X->DataType()->Size(); HIP_RETURN_IF_ERROR(hipMemcpyAsync(output, input, count * element_bytes, hipMemcpyDeviceToDevice, Stream(ctx))); return Status::OK(); } } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "squeeze.h" namespace onnxruntime { namespace cuda { ONNX_OPERATOR_VERSIONED_KERNEL_EX( Squeeze, kOnnxDomain, 1, 10, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .Alias(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()), Squeeze); // explicit support for negative axis. ONNX_OPERATOR_VERSIONED_KERNEL_EX( Squeeze, kOnnxDomain, 11, 12, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .Alias(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()), Squeeze); // axes is input instead of attribute ONNX_OPERATOR_KERNEL_EX( Squeeze, kOnnxDomain, 13, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .Alias(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .InputMemoryType(OrtMemTypeCPUInput, 1), Squeeze); Status Squeeze::ComputeInternal(OpKernelContext* ctx) const { const Tensor* X = ctx->Input<Tensor>(0); const TensorShape& X_shape = X->Shape(); TensorShapeVector axes; size_t num_inputs = ctx->InputCount(); if (num_inputs == 2) { // axes is an input const Tensor* axes_tensor = ctx->Input<Tensor>(1); ORT_ENFORCE(axes_tensor != nullptr, "Axes input is null"); ORT_ENFORCE(axes_tensor->Shape().NumDimensions() == 1, "An axes tensor must be a vector tensor."); auto nDims = static_cast<size_t>(axes_tensor->Shape()[0]); const auto* data = axes_tensor->Data<int64_t>(); axes.assign(data, data + nDims); } else { axes.assign(axes_.begin(), axes_.end()); } TensorShapeVector output_shape = ComputeOutputShape(X_shape, axes); Tensor* Y = ctx->Output(0, TensorShape(output_shape)); const void* input = X->DataRaw(); void* output = Y->MutableDataRaw(); if (input == output) return Status::OK(); auto count = X->Shape().Size(); auto element_bytes = X->DataType()->Size(); CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(output, input, count * element_bytes, cudaMemcpyDeviceToDevice, Stream(ctx))); return Status::OK(); } } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/rocm/rocm_kernel.h" #include "core/providers/cpu/tensor/squeeze.h" namespace onnxruntime { namespace rocm { class Squeeze final : public SqueezeBase, public RocmKernel { public: Squeeze(const OpKernelInfo& info) : SqueezeBase(info), RocmKernel(info) {} Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/cuda/cuda_kernel.h" #include "core/providers/cpu/tensor/squeeze.h" namespace onnxruntime { namespace cuda { class Squeeze final : public SqueezeBase, public CudaKernel { public: Squeeze(const OpKernelInfo& info) : SqueezeBase(info), CudaKernel(info) {} Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/rocm/rocm_kernel.h" #include "core/providers/cpu/tensor/tile.h" namespace onnxruntime { namespace rocm { struct Tile final : RocmKernel { explicit Tile(const OpKernelInfo& info) : RocmKernel(info) { } Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/cuda/cuda_kernel.h" #include "core/providers/cpu/tensor/tile.h" namespace onnxruntime { namespace cuda { struct Tile final : CudaKernel { explicit Tile(const OpKernelInfo& info) : CudaKernel(info) { } Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/rocm/shared_inc/rocm_utils.h" namespace onnxruntime { namespace rocm { template <typename T> void TileImpl(hipStream_t stream, const size_t shape_rank, const TArray<fast_divmod>& fdm_input_shape, const TArray<int64_t>& input_stride, const T* input_data, const TArray<fast_divmod>& fdm_output_strides, T* output_data, const size_t N); template <typename T> void TileMemcpyImpl(hipStream_t stream, const T* input_data, T* output_data, const size_t num_input_elements, const size_t repeats); template <typename T> void TileBatchedMemcpyImpl(hipStream_t stream, const T* input_data, T* output_data, const size_t size_input_row, const size_t num_input_elements, const size_t batch_repeats, const size_t repeats_per_batch); } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/cuda/shared_inc/cuda_utils.h" namespace onnxruntime { namespace cuda { template <typename T> void TileImpl(cudaStream_t stream, const size_t shape_rank, const TArray<fast_divmod>& fdm_input_shape, const TArray<int64_t>& input_stride, const T* input_data, const TArray<fast_divmod>& fdm_output_strides, T* output_data, const size_t N); template <typename T> void TileMemcpyImpl(cudaStream_t stream, const T* input_data, T* output_data, const size_t num_input_elements, const size_t repeats); template <typename T> void TileBatchedMemcpyImpl(cudaStream_t stream, const T* input_data, T* output_data, const size_t size_input_row, const size_t num_input_elements, const size_t batch_repeats, const size_t repeats_per_batch); } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/shared_library/provider_api.h" #include "core/common/gsl.h" #include "core/providers/rocm/rocm_kernel.h" #include "core/providers/cpu/tensor/transpose.h" namespace onnxruntime { namespace rocm { class Transpose final : public RocmKernel, public TransposeBase { public: Transpose(const OpKernelInfo& info) : RocmKernel(info), TransposeBase(info) { } Status ComputeInternal(OpKernelContext* context) const override; static Status DoTranspose(const Transpose& transpose_kernel, onnxruntime::Stream* ort_stream, const gsl::span<const size_t>& permutations, const Tensor& input, Tensor& output); // `input_shape_override` (if provided) overrides the shape of `input` for compute purposes // `output_shape_override` (if provided) overrides the shape of `output` for compute purposes static Status DoTranspose(const hipDeviceProp_t& prop, hipStream_t stream, const rocblas_handle rocblas_handle, const gsl::span<const size_t>& permutations, const Tensor& input, Tensor& output, const TensorShape* input_shape_override = nullptr, const TensorShape* output_shape_override = nullptr); }; } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/shared_library/provider_api.h" #include "core/common/gsl.h" #include "core/providers/cuda/cuda_kernel.h" #include "core/providers/cpu/tensor/transpose.h" namespace onnxruntime { namespace cuda { class Transpose final : public CudaKernel, public TransposeBase { public: Transpose(const OpKernelInfo& info) : CudaKernel(info), TransposeBase(info) { } Status ComputeInternal(OpKernelContext* context) const override; static Status DoTranspose(const Transpose& transpose_kernel, onnxruntime::Stream* ort_stream, const gsl::span<const size_t>& permutations, const Tensor& input, Tensor& output); // `input_shape_override` (if provided) overrides the shape of `input` for compute purposes // `output_shape_override` (if provided) overrides the shape of `output` for compute purposes static Status DoTranspose(const cudaDeviceProp& prop, cudaStream_t stream, const cublasHandle_t cublas_handle, const gsl::span<const size_t>& permutations, const Tensor& input, Tensor& output, const TensorShape* input_shape_override = nullptr, const TensorShape* output_shape_override = nullptr); }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/rocm/shared_inc/rocm_utils.h" namespace onnxruntime { namespace rocm { bool CanDoTranspose3D(const hipDeviceProp_t& prop, size_t rank, const gsl::span<const int64_t>& input_dims, const gsl::span<const size_t>& permutations, dim3& grid_size, dim3& block_size); Status Transpose3DImpl(hipStream_t stream, size_t element_size, const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides, const void* input_data, void* output_data, int64_t N, const dim3& grid_size, const dim3& block_size); bool CanDoTranspose4DParallelizeMultipleElementsPerThreadInInnermostDim(const hipDeviceProp_t& prop, size_t element_size, int32_t rank, const gsl::span<const int64_t>& input_dims, const gsl::span<const size_t>& permutations, dim3& grid_size, dim3& block_size); Status Transpose4DParallelizeMultipleElementsPerThreadInInnermostDim(hipStream_t stream, size_t element_size, const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides, const void* input_data, const TArray<int64_t>& output_strides, void* output_data, int N, const dim3& grid_size, const dim3& block_size); bool CanDoTranspose4DParallelizeOneElementPerThread(const hipDeviceProp_t& prop, size_t element_size, int32_t rank, const gsl::span<const int64_t>& input_dims, const gsl::span<const size_t>& permutations, dim3& grid_size, dim3& block_size); Status Transpose4DParallelizeOneElementPerThread(hipStream_t stream, size_t element_size, const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides, const void* input_data, const TArray<int64_t>& output_strides, void* output_data, int N, const dim3& grid_size, const dim3& block_size); Status TransposeImpl(hipStream_t stream, size_t element_size, int32_t shape_rank, const TArray<int64_t>& input_strides, const void* input_data, const TArray<fast_divmod>& fdm_output_strides, void* output_data, int N); } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/cuda/shared_inc/cuda_utils.h" namespace onnxruntime { namespace cuda { bool CanDoTranspose3D(const cudaDeviceProp& prop, size_t rank, const gsl::span<const int64_t>& input_dims, const gsl::span<const size_t>& permutations, dim3& grid_size, dim3& block_size); Status Transpose3DImpl(cudaStream_t stream, size_t element_size, const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides, const void* input_data, void* output_data, int64_t N, const dim3& grid_size, const dim3& block_size); bool CanDoTranspose4DParallelizeMultipleElementsPerThreadInInnermostDim(const cudaDeviceProp& prop, size_t element_size, int32_t rank, const gsl::span<const int64_t>& input_dims, const gsl::span<const size_t>& permutations, dim3& grid_size, dim3& block_size); Status Transpose4DParallelizeMultipleElementsPerThreadInInnermostDim(cudaStream_t stream, size_t element_size, const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides, const void* input_data, const TArray<int64_t>& output_strides, void* output_data, int N, const dim3& grid_size, const dim3& block_size); bool CanDoTranspose4DParallelizeOneElementPerThread(const cudaDeviceProp& prop, size_t element_size, int32_t rank, const gsl::span<const int64_t>& input_dims, const gsl::span<const size_t>& permutations, dim3& grid_size, dim3& block_size); Status Transpose4DParallelizeOneElementPerThread(cudaStream_t stream, size_t element_size, const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides, const void* input_data, const TArray<int64_t>& output_strides, void* output_data, int N, const dim3& grid_size, const dim3& block_size); Status TransposeImpl(cudaStream_t stream, size_t element_size, int32_t shape_rank, const TArray<int64_t>& input_strides, const void* input_data, const TArray<fast_divmod>& fdm_output_strides, void* output_data, int N); } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/rocm/tensor/trilu.h" #include "core/providers/rocm/tensor/trilu_impl.h" #include "core/providers/cpu/tensor/utils.h" using namespace onnxruntime::common; namespace onnxruntime { namespace rocm { ONNX_OPERATOR_KERNEL_EX( Trilu, kOnnxDomain, 14, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .InputMemoryType(OrtMemTypeCPUInput, 1) .MayInplace(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()), Trilu); Status Trilu::ComputeInternal(OpKernelContext* ctx) const { const Tensor* input_ptr = ctx->Input<Tensor>(0); const auto* k = ctx->Input<Tensor>(1); int64_t k_val = 0; if (k) { ORT_ENFORCE(IsScalarOr1ElementVector(k), "k should be a 1-D or 0-D tensor."); k_val = *(k->Data<int64_t>()); } if (input_ptr == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, "input count mismatch"); const Tensor& input = *input_ptr; const auto& shape = input.Shape(); const auto& input_dims = shape.GetDims(); auto rank = input_dims.size(); if (rank < 2) { return Status(ONNXRUNTIME, INVALID_ARGUMENT, "Input tensor should have a rank of at least 2"); } Tensor* output = ctx->Output(0, shape); auto matrix_size = input_dims[rank - 1] * input_dims[rank - 2]; if (matrix_size == 0) { return Status::OK(); } const fast_divmod row_col_divmod_indices(gsl::narrow_cast<int>(input_dims[rank - 1])); const fast_divmod batch_divmod_indices(gsl::narrow_cast<int>(matrix_size)); size_t element_size = input.DataType()->Size(); return TriluImpl( this->Stream(ctx), upper_, element_size, k_val, input.DataRaw(), output->MutableDataRaw(), gsl::narrow<int>(shape.Size()), batch_divmod_indices, row_col_divmod_indices); } } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/tensor/trilu.h" #include "core/providers/cuda/tensor/trilu_impl.h" #include "core/providers/cpu/tensor/utils.h" using namespace onnxruntime::common; namespace onnxruntime { namespace cuda { ONNX_OPERATOR_KERNEL_EX( Trilu, kOnnxDomain, 14, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .InputMemoryType(OrtMemTypeCPUInput, 1) .MayInplace(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()), Trilu); Status Trilu::ComputeInternal(OpKernelContext* ctx) const { const Tensor* input_ptr = ctx->Input<Tensor>(0); const auto* k = ctx->Input<Tensor>(1); int64_t k_val = 0; if (k) { ORT_ENFORCE(IsScalarOr1ElementVector(k), "k should be a 1-D or 0-D tensor."); k_val = *(k->Data<int64_t>()); } if (input_ptr == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, "input count mismatch"); const Tensor& input = *input_ptr; const auto& shape = input.Shape(); const auto& input_dims = shape.GetDims(); auto rank = input_dims.size(); if (rank < 2) { return Status(ONNXRUNTIME, INVALID_ARGUMENT, "Input tensor should have a rank of at least 2"); } Tensor* output = ctx->Output(0, shape); auto matrix_size = input_dims[rank - 1] * input_dims[rank - 2]; if (matrix_size == 0) { return Status::OK(); } const fast_divmod row_col_divmod_indices(gsl::narrow_cast<int>(input_dims[rank - 1])); const fast_divmod batch_divmod_indices(gsl::narrow_cast<int>(matrix_size)); size_t element_size = input.DataType()->Size(); return TriluImpl( this->Stream(ctx), upper_, element_size, k_val, input.DataRaw(), output->MutableDataRaw(), gsl::narrow<int>(shape.Size()), batch_divmod_indices, row_col_divmod_indices); } } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/rocm_kernel.h" #include "core/providers/cpu/generator/constant_of_shape_base.h" #include "core/providers/rocm/shared_inc/rocm_utils.h" namespace onnxruntime { namespace rocm { class ConstantOfShape final : public ConstantOfShapeBase<>, public RocmKernel { public: explicit ConstantOfShape(const OpKernelInfo& info) : ConstantOfShapeBase(info), RocmKernel(info) {} ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ConstantOfShape); Status ComputeInternal(OpKernelContext* ctx) const override; }; } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/cuda_kernel.h" #include "core/providers/cpu/generator/constant_of_shape_base.h" #include "core/providers/cuda/shared_inc/cuda_utils.h" namespace onnxruntime { namespace cuda { class ConstantOfShape final : public ConstantOfShapeBase<>, public CudaKernel { public: explicit ConstantOfShape(const OpKernelInfo& info) : ConstantOfShapeBase(info), CudaKernel(info) {} ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ConstantOfShape); Status ComputeInternal(OpKernelContext* ctx) const override; }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace rocm { class Trilu final : public RocmKernel { public: Trilu(const OpKernelInfo& info) : RocmKernel(info), upper_(info.GetAttrOrDefault<int64_t>("upper", 1) >= 1) { } ~Trilu() = default; Status ComputeInternal(OpKernelContext* context) const override; private: bool upper_; }; } // namespace rocm } // namespace onnxruntime###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace cuda { class Trilu final : public CudaKernel { public: Trilu(const OpKernelInfo& info) : CudaKernel(info), upper_(info.GetAttrOrDefault<int64_t>("upper", 1) >= 1) { } ~Trilu() = default; Status ComputeInternal(OpKernelContext* context) const override; private: bool upper_; }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/rocm/shared_inc/rocm_utils.h" namespace onnxruntime { namespace rocm { Status TriluImpl( hipStream_t stream, bool upper, size_t element_size, int64_t k, const void* input_data, void* output_data, int N, const fast_divmod& batch_divmod_indices, const fast_divmod& row_col_divmod_indices); } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/cuda/shared_inc/cuda_utils.h" namespace onnxruntime { namespace cuda { Status TriluImpl( cudaStream_t stream, bool upper, size_t element_size, int64_t k, const void* input_data, void* output_data, int N, const fast_divmod& batch_divmod_indices, const fast_divmod& row_col_divmod_indices); } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/rocm/tensor/unsqueeze.h" namespace onnxruntime { namespace rocm { ONNX_OPERATOR_VERSIONED_KERNEL_EX( Unsqueeze, kOnnxDomain, 1, 10, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .Alias(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()), Unsqueeze); // explicitly support negative axis ONNX_OPERATOR_VERSIONED_KERNEL_EX( Unsqueeze, kOnnxDomain, 11, 12, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .Alias(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()), Unsqueeze); // axes is input instead of attribute, support bfloat16 ONNX_OPERATOR_KERNEL_EX( Unsqueeze, kOnnxDomain, 13, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .Alias(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .InputMemoryType(OrtMemTypeCPUInput, 1), Unsqueeze); Status Unsqueeze::ComputeInternal(OpKernelContext* ctx) const { Prepare p; ORT_RETURN_IF_ERROR(PrepareCompute(ctx, p)); const void* input = p.input_tensor->DataRaw(); void* output = p.output_tensor->MutableDataRaw(); if (input == output) return Status::OK(); auto count = p.input_tensor->Shape().Size(); auto element_bytes = p.input_tensor->DataType()->Size(); HIP_RETURN_IF_ERROR(hipMemcpyAsync(output, input, count * element_bytes, hipMemcpyDeviceToDevice, Stream(ctx))); return Status::OK(); } } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/tensor/unsqueeze.h" namespace onnxruntime { namespace cuda { ONNX_OPERATOR_VERSIONED_KERNEL_EX( Unsqueeze, kOnnxDomain, 1, 10, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .Alias(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()), Unsqueeze); // explicitly support negative axis ONNX_OPERATOR_VERSIONED_KERNEL_EX( Unsqueeze, kOnnxDomain, 11, 12, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .Alias(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()), Unsqueeze); // axes is input instead of attribute, support bfloat16 ONNX_OPERATOR_KERNEL_EX( Unsqueeze, kOnnxDomain, 13, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .Alias(0, 0) .TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()) .InputMemoryType(OrtMemTypeCPUInput, 1), Unsqueeze); Status Unsqueeze::ComputeInternal(OpKernelContext* ctx) const { Prepare p; ORT_RETURN_IF_ERROR(PrepareCompute(ctx, p)); const void* input = p.input_tensor->DataRaw(); void* output = p.output_tensor->MutableDataRaw(); if (input == output) return Status::OK(); auto count = p.input_tensor->Shape().Size(); auto element_bytes = p.input_tensor->DataType()->Size(); CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(output, input, count * element_bytes, cudaMemcpyDeviceToDevice, Stream(ctx))); return Status::OK(); } } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/rocm/rocm_kernel.h" #include "core/providers/cpu/tensor/unsqueeze.h" namespace onnxruntime { namespace rocm { class Unsqueeze final : public UnsqueezeBase, public RocmKernel { public: Unsqueeze(const OpKernelInfo& info) : UnsqueezeBase(info), RocmKernel(info) {} Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/cuda/cuda_kernel.h" #include "core/providers/cpu/tensor/unsqueeze.h" namespace onnxruntime { namespace cuda { class Unsqueeze final : public UnsqueezeBase, public CudaKernel { public: Unsqueeze(const OpKernelInfo& info) : UnsqueezeBase(info), CudaKernel(info) {} Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/shared_library/provider_api.h" #include "core/providers/rocm/rocm_kernel.h" #include "core/providers/cpu/tensor/upsamplebase.h" namespace onnxruntime { namespace rocm { template <typename T> class Upsample : public UpsampleBase, public RocmKernel { public: Upsample(const OpKernelInfo& info) : UpsampleBase(info), RocmKernel(info) { } Status ComputeInternal(OpKernelContext* context) const override; Status BaseCompute(OpKernelContext* context, const std::vector<float>& roi, const std::vector<float>& scales, const gsl::span<const int64_t>& output_dims) const; }; } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/shared_library/provider_api.h" #include "core/providers/cuda/cuda_kernel.h" #include "core/providers/cpu/tensor/upsamplebase.h" namespace onnxruntime { namespace cuda { template <typename T> class Upsample : public UpsampleBase, public CudaKernel { public: Upsample(const OpKernelInfo& info) : UpsampleBase(info), CudaKernel(info) { } Status ComputeInternal(OpKernelContext* context) const override; Status BaseCompute(OpKernelContext* context, const std::vector<float>& roi, const std::vector<float>& scales, const gsl::span<const int64_t>& output_dims) const; }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/rocm/shared_inc/rocm_utils.h" #include "core/common/common.h" #include "core/providers/cpu/tensor/upsamplebase.h" namespace onnxruntime { namespace rocm { template <typename T> void UpampleImpl(hipStream_t stream, const onnxruntime::UpsampleMode upsample_mode, const size_t rank, const int64_t input_dim2, const TArray<int64_t>& input_pitches, const TArray<fast_divmod>& output_div_pitches, const TArray<fast_divmod>& scales_div, const T* input_data, T* output_data, const size_t N); } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/cuda/shared_inc/cuda_utils.h" #include "core/common/common.h" #include "core/providers/cpu/tensor/upsamplebase.h" namespace onnxruntime { namespace cuda { template <typename T> void UpampleImpl(cudaStream_t stream, const onnxruntime::UpsampleMode upsample_mode, const size_t rank, const int64_t input_dim2, const TArray<int64_t>& input_pitches, const TArray<fast_divmod>& output_div_pitches, const TArray<fast_divmod>& scales_div, const T* input_data, T* output_data, const size_t N); } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace rocm { template <typename T> class Where final : public RocmKernel { public: Where(const OpKernelInfo& info) : RocmKernel(info) {} Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace cuda { template <typename T> class Where final : public CudaKernel { public: Where(const OpKernelInfo& info) : CudaKernel(info) {} Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/rocm/shared_inc/rocm_utils.h" #include "core/common/common.h" namespace onnxruntime { namespace rocm { template <typename T> void WhereImpl( hipStream_t stream, size_t output_rank_or_simple_broadcast, BroadcastIndexType cond_index_type, const TArray<int64_t>& cond_padded_strides, const bool* cond_data, BroadcastIndexType x_index_type, const TArray<int64_t>& x_padded_strides, const T* x_data, BroadcastIndexType y_index_type, const TArray<int64_t>& y_padded_strides, const T* y_data, const TArray<fast_divmod>& fdm_output_strides, T* output_data, size_t count); } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <stdint.h> #include "core/providers/cuda/shared_inc/cuda_utils.h" #include "core/common/common.h" namespace onnxruntime { namespace cuda { template <typename T> void WhereImpl( cudaStream_t stream, size_t output_rank_or_simple_broadcast, BroadcastIndexType cond_index_type, const TArray<int64_t>& cond_padded_strides, const bool* cond_data, BroadcastIndexType x_index_type, const TArray<int64_t>& x_padded_strides, const T* x_data, BroadcastIndexType y_index_type, const TArray<int64_t>& y_padded_strides, const T* y_data, const TArray<fast_divmod>& fdm_output_strides, T* output_data, size_t count); } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef NDEBUG namespace onnxruntime { namespace rocm { namespace test { // Test header provides function declarations in EP-side bridge. bool TestDeferredRelease(); bool TestDeferredReleaseWithoutArena(); bool TestBeamSearchTopK(); bool TestGreedySearchTopOne(); } // namespace test } // namespace rocm } // namespace onnxruntime #endif ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef NDEBUG namespace onnxruntime { namespace cuda { namespace test { // Test header provides function declarations in EP-side bridge. bool TestDeferredRelease(); bool TestDeferredReleaseWithoutArena(); bool TestBeamSearchTopK(); bool TestGreedySearchTopOne(); } // namespace test } // namespace cuda } // namespace onnxruntime #endif ###
#ifndef NDEBUG #include <iostream> #include "core/providers/rocm/test/all_tests.h" #include "core/providers/rocm/rocm_execution_provider.h" #include "core/providers/rocm/rocm_allocator.h" #include "core/providers/rocm/rocm_stream_handle.h" namespace onnxruntime { namespace rocm { namespace test { bool TestDeferredRelease() { ROCMExecutionProviderInfo info; ROCMExecutionProvider ep(info); onnxruntime::AllocatorManager allocator_manager; ep.RegisterAllocator(allocator_manager); AllocatorPtr gpu_alloctor = ep.GetAllocator(OrtMemType::OrtMemTypeDefault); AllocatorPtr cpu_pinned_alloc = ep.GetAllocator(OrtMemTypeCPU); RocmStream stream(nullptr, gpu_alloctor->Info().device, cpu_pinned_alloc, false, true, nullptr, nullptr); const size_t n_bytes = 10 * 1000000; const int64_t n_allocs = 64; ORT_THROW_IF_ERROR(ep.OnRunStart()); for (size_t i = 0; i < n_allocs; ++i) { auto pinned_buffer = ep.AllocateBufferOnCPUPinned<void>(n_bytes); stream.EnqueDeferredCPUBuffer(pinned_buffer.release()); } AllocatorStats stats; cpu_pinned_alloc->GetStats(&stats); ORT_ENFORCE(stats.num_allocs == n_allocs); ORT_THROW_IF_ERROR(stream.CleanUpOnRunEnd()); ORT_THROW_IF_ERROR(ep.OnRunEnd(true)); return true; } bool TestDeferredReleaseWithoutArena() { ROCMExecutionProviderInfo info; ROCMExecutionProvider ep(info); onnxruntime::AllocatorManager allocator_manager; OrtDevice pinned_device{OrtDevice::CPU, OrtDevice::MemType::CUDA_PINNED, DEFAULT_CPU_ALLOCATOR_DEVICE_ID}; AllocatorCreationInfo pinned_memory_info( [](OrtDevice::DeviceId device_id) { return std::make_unique<ROCMPinnedAllocator>(device_id, CUDA_PINNED); }, pinned_device.Id(), false ); auto rocm_pinned_alloc = CreateAllocator(pinned_memory_info); allocator_manager.InsertAllocator(rocm_pinned_alloc); ep.RegisterAllocator(allocator_manager); AllocatorPtr gpu_alloctor = ep.GetAllocator(OrtMemType::OrtMemTypeDefault); AllocatorPtr cpu_pinned_alloc = ep.GetAllocator(OrtMemTypeCPU); RocmStream stream(nullptr, gpu_alloctor->Info().device, cpu_pinned_alloc, false, true, nullptr, nullptr); const size_t n_bytes = 10 * 1000000; const int64_t n_allocs = 64; ORT_THROW_IF_ERROR(ep.OnRunStart()); for (size_t i = 0; i < n_allocs; ++i) { auto pinned_buffer = ep.AllocateBufferOnCPUPinned<void>(n_bytes); stream.EnqueDeferredCPUBuffer(pinned_buffer.release()); } ORT_THROW_IF_ERROR(stream.CleanUpOnRunEnd()); ORT_THROW_IF_ERROR(ep.OnRunEnd(true)); return true; } } } } #endif###
#ifndef NDEBUG #include <iostream> #include "core/providers/cuda/test/all_tests.h" #include "core/providers/cuda/cuda_execution_provider.h" #include "core/providers/cuda/cuda_allocator.h" #include "core/providers/cuda/cuda_stream_handle.h" namespace onnxruntime { namespace cuda { namespace test { bool TestDeferredRelease() { CUDAExecutionProviderInfo info; CUDAExecutionProvider ep(info); onnxruntime::AllocatorManager allocator_manager; ep.RegisterAllocator(allocator_manager); AllocatorPtr gpu_alloctor = ep.GetAllocator(OrtMemType::OrtMemTypeDefault); AllocatorPtr cpu_pinned_alloc = ep.GetAllocator(OrtMemTypeCPU); CudaStream stream(nullptr, gpu_alloctor->Info().device, cpu_pinned_alloc, false, true, nullptr, nullptr); const size_t n_bytes = 10 * 1000000; const int64_t n_allocs = 64; ORT_THROW_IF_ERROR(ep.OnRunStart()); for (size_t i = 0; i < n_allocs; ++i) { auto pinned_buffer = ep.AllocateBufferOnCPUPinned<void>(n_bytes); stream.EnqueDeferredCPUBuffer(pinned_buffer.release()); } AllocatorStats stats; cpu_pinned_alloc->GetStats(&stats); ORT_ENFORCE(stats.num_allocs == n_allocs); ORT_THROW_IF_ERROR(stream.CleanUpOnRunEnd()); ORT_THROW_IF_ERROR(ep.OnRunEnd(true)); return true; } bool TestDeferredReleaseWithoutArena() { CUDAExecutionProviderInfo info; CUDAExecutionProvider ep(info); onnxruntime::AllocatorManager allocator_manager; OrtDevice pinned_device{OrtDevice::CPU, OrtDevice::MemType::CUDA_PINNED, DEFAULT_CPU_ALLOCATOR_DEVICE_ID}; AllocatorCreationInfo pinned_memory_info( [](OrtDevice::DeviceId device_id) { return std::make_unique<CUDAPinnedAllocator>(device_id, CUDA_PINNED); }, pinned_device.Id(), false ); auto cuda_pinned_alloc = CreateAllocator(pinned_memory_info); allocator_manager.InsertAllocator(cuda_pinned_alloc); ep.RegisterAllocator(allocator_manager); AllocatorPtr gpu_alloctor = ep.GetAllocator(OrtMemType::OrtMemTypeDefault); AllocatorPtr cpu_pinned_alloc = ep.GetAllocator(OrtMemTypeCPU); CudaStream stream(nullptr, gpu_alloctor->Info().device, cpu_pinned_alloc, false, true, nullptr, nullptr); const size_t n_bytes = 10 * 1000000; const int64_t n_allocs = 64; ORT_THROW_IF_ERROR(ep.OnRunStart()); for (size_t i = 0; i < n_allocs; ++i) { auto pinned_buffer = ep.AllocateBufferOnCPUPinned<void>(n_bytes); stream.EnqueDeferredCPUBuffer(pinned_buffer.release()); } ORT_THROW_IF_ERROR(stream.CleanUpOnRunEnd()); ORT_THROW_IF_ERROR(ep.OnRunEnd(true)); return true; } } } } #endif ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/framework/random_generator.h" namespace onnxruntime { namespace rocm { #define RANDOM_KERNEL_DECLARE(name) \ template <typename T> \ void name##KernelImpl(const hipDeviceProp_t& prop, hipStream_t stream, const int64_t N, const float alpha, \ const float beta, PhiloxGenerator& generator, T* Y_data); RANDOM_KERNEL_DECLARE(RandomNormal) RANDOM_KERNEL_DECLARE(RandomUniform) #undef RANDOM_KERNEL_DECLARE } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/framework/random_generator.h" namespace onnxruntime { namespace cuda { #define RANDOM_KERNEL_DECLARE(name) \ template <typename T> \ void name##KernelImpl(const cudaDeviceProp& prop, cudaStream_t stream, const int64_t N, const float alpha, \ const float beta, PhiloxGenerator& generator, T* Y_data); RANDOM_KERNEL_DECLARE(RandomNormal) RANDOM_KERNEL_DECLARE(RandomUniform) #undef RANDOM_KERNEL_DECLARE } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/rocm_common.h" #include <hip/hip_runtime.h> namespace onnxruntime { namespace rocm { struct TritonKernelMetaData { int num_warps; int shared_mem_size; hipFunction_t func; std::unordered_map<std::string, int> constants; std::string name; }; namespace { template <typename T> struct DataTypeToName; #define DTYPE_TO_STR(type, name) \ template <> \ struct DataTypeToName<type> { \ constexpr static const char* value = name; \ }; DTYPE_TO_STR(float, "fp32"); DTYPE_TO_STR(half, "fp16"); DTYPE_TO_STR(double, "fp64"); DTYPE_TO_STR(BFloat16, "bf16"); } // end of namespace template <typename T> const std::string GetDataTypeName() { return DataTypeToName<T>::value; } void LoadOrtTritonKernel(); Status LaunchTritonKernel(hipStream_t stream, std::string fname, int grid0, int grid1, int grid2, void* args, size_t args_size); const TritonKernelMetaData* GetOrtTritonKernelMetadata(size_t idx); const std::vector<int>* GetOrtTritonKernelByGroup(std::string group_name); Status LaunchTritonKernel(hipStream_t stream, size_t idx, int grid0, int grid1, int grid2, void* args, size_t args_size); } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/cuda_common.h" #include <cuda.h> namespace onnxruntime { namespace cuda { struct TritonKernelMetaData { int num_warps; int shared_mem_size; CUfunction func; std::unordered_map<std::string, int> constants; std::string name; }; namespace { template <typename T> struct DataTypeToName; #define DTYPE_TO_STR(type, name) \ template <> \ struct DataTypeToName<type> { \ constexpr static const char* value = name; \ }; DTYPE_TO_STR(float, "fp32"); DTYPE_TO_STR(half, "fp16"); DTYPE_TO_STR(double, "fp64"); DTYPE_TO_STR(BFloat16, "bf16"); } // end of namespace template <typename T> const std::string GetDataTypeName() { return DataTypeToName<T>::value; } void LoadOrtTritonKernel(); Status LaunchTritonKernel(cudaStream_t stream, std::string fname, int grid0, int grid1, int grid2, void* args, size_t args_size); const TritonKernelMetaData* GetOrtTritonKernelMetadata(size_t idx); const std::vector<int>* GetOrtTritonKernelByGroup(std::string group_name); Status LaunchTritonKernel(cudaStream_t stream, size_t idx, int grid0, int grid1, int grid2, void* args, size_t args_size); } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "activations.h" #include "core/framework/op_kernel.h" using namespace onnxruntime::rocm; namespace onnxruntime { namespace contrib { namespace rocm { #define REGISTER_ACTIVATION_KERNEL(x, ver, domain, T) \ ONNX_OPERATOR_TYPED_KERNEL_EX( \ x, \ domain, \ ver, \ T, \ kRocmExecutionProvider, \ (*KernelDefBuilder::Create()) \ .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \ .MayInplace(0, 0), \ x<T>); #define UNARY_ACTIVATION_COMPUTE(x, T) \ template <> \ Status x<T>::ComputeInternal(OpKernelContext* context) const { \ UnaryElementwisePreparation p; \ ORT_RETURN_IF_ERROR(UnaryElementwise::Prepare(context, &p)); \ Ctx##x func_ctx = MakeFuncCtx(); \ Impl_##x<typename ToHipType<T>::MappedType>( \ Stream(context), \ reinterpret_cast<const typename ToHipType<T>::MappedType*>(p.input_tensor->Data<T>()), \ reinterpret_cast<typename ToHipType<T>::MappedType*>(p.output_tensor->MutableData<T>()), \ &func_ctx, p.output_tensor->Shape().Size()); \ \ return Status::OK(); \ } #define UNARY_ACTIVATION_OP_TYPED(name, ver, domain, T) \ REGISTER_ACTIVATION_KERNEL(name, ver, domain, T) \ UNARY_ACTIVATION_COMPUTE(name, T) #define UNARY_ACTIVATION_OP_HFD(name, ver, domain) \ UNARY_ACTIVATION_OP_TYPED(name, ver, domain, MLFloat16) \ UNARY_ACTIVATION_OP_TYPED(name, ver, domain, float) \ UNARY_ACTIVATION_OP_TYPED(name, ver, domain, double) UNARY_ACTIVATION_OP_HFD(Affine, 1, kOnnxDomain); UNARY_ACTIVATION_OP_HFD(ParametricSoftplus, 1, kOnnxDomain); UNARY_ACTIVATION_OP_HFD(ScaledTanh, 1, kOnnxDomain); UNARY_ACTIVATION_OP_HFD(Gelu, 1, kMSDomain); UNARY_ACTIVATION_OP_HFD(QuickGelu, 1, kMSDomain); REGISTER_ACTIVATION_KERNEL(ThresholdedRelu, 1, kOnnxDomain, MLFloat16) REGISTER_ACTIVATION_KERNEL(ThresholdedRelu, 1, kOnnxDomain, float) REGISTER_ACTIVATION_KERNEL(ThresholdedRelu, 1, kOnnxDomain, double) } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "activations.h" #include "core/framework/op_kernel.h" using namespace onnxruntime::cuda; namespace onnxruntime { namespace contrib { namespace cuda { #define REGISTER_ACTIVATION_KERNEL(x, ver, domain, T) \ ONNX_OPERATOR_TYPED_KERNEL_EX( \ x, \ domain, \ ver, \ T, \ kCudaExecutionProvider, \ (*KernelDefBuilder::Create()) \ .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \ .MayInplace(0, 0), \ x<T>); #define UNARY_ACTIVATION_COMPUTE(x, T) \ template <> \ Status x<T>::ComputeInternal(OpKernelContext* context) const { \ UnaryElementwisePreparation p; \ ORT_RETURN_IF_ERROR(UnaryElementwise::Prepare(context, &p)); \ Ctx##x func_ctx = MakeFuncCtx(); \ Impl_##x<typename ToCudaType<T>::MappedType>( \ Stream(context), \ reinterpret_cast<const typename ToCudaType<T>::MappedType*>(p.input_tensor->Data<T>()), \ reinterpret_cast<typename ToCudaType<T>::MappedType*>(p.output_tensor->MutableData<T>()), \ &func_ctx, p.output_tensor->Shape().Size()); \ \ return Status::OK(); \ } #define UNARY_ACTIVATION_OP_TYPED(name, ver, domain, T) \ REGISTER_ACTIVATION_KERNEL(name, ver, domain, T) \ UNARY_ACTIVATION_COMPUTE(name, T) #define UNARY_ACTIVATION_OP_HFD(name, ver, domain) \ UNARY_ACTIVATION_OP_TYPED(name, ver, domain, MLFloat16) \ UNARY_ACTIVATION_OP_TYPED(name, ver, domain, float) \ UNARY_ACTIVATION_OP_TYPED(name, ver, domain, double) UNARY_ACTIVATION_OP_HFD(Affine, 1, kOnnxDomain); UNARY_ACTIVATION_OP_HFD(ParametricSoftplus, 1, kOnnxDomain); UNARY_ACTIVATION_OP_HFD(ScaledTanh, 1, kOnnxDomain); UNARY_ACTIVATION_OP_HFD(Gelu, 1, kMSDomain); UNARY_ACTIVATION_OP_HFD(QuickGelu, 1, kMSDomain); REGISTER_ACTIVATION_KERNEL(ThresholdedRelu, 1, kOnnxDomain, MLFloat16) REGISTER_ACTIVATION_KERNEL(ThresholdedRelu, 1, kOnnxDomain, float) REGISTER_ACTIVATION_KERNEL(ThresholdedRelu, 1, kOnnxDomain, double) } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/rocm_common.h" #include "core/providers/rocm/math/unary_elementwise_ops.h" #include "core/providers/rocm/math/binary_elementwise_ops.h" #include "core/providers/rocm/activation/activations.h" #include "activations_impl.h" using namespace onnxruntime::rocm; namespace onnxruntime { namespace contrib { namespace rocm { template <typename T> class Affine final : public UnaryElementwise { public: Affine(const OpKernelInfo& info) : UnaryElementwise(info) { ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; private: MAKE_FUNC_CTX_ALPHA_BETA() float alpha_; float beta_; }; template <typename T> class ParametricSoftplus final : public UnaryElementwise { public: ParametricSoftplus(const OpKernelInfo& info) : UnaryElementwise(info) { ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; private: MAKE_FUNC_CTX_ALPHA_BETA() float alpha_; float beta_; }; template <typename T> class ScaledTanh final : public UnaryElementwise { public: ScaledTanh(const OpKernelInfo& info) : UnaryElementwise(info) { ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; private: MAKE_FUNC_CTX_ALPHA_BETA() float alpha_; float beta_; }; template <typename T> class Gelu final : public UnaryElementwise { public: Gelu(const OpKernelInfo& info) : UnaryElementwise(info) {} Status ComputeInternal(OpKernelContext* context) const override; private: MAKE_FUNC_CTX_NULL() }; template <typename T> class QuickGelu final : public UnaryElementwise { public: QuickGelu(const OpKernelInfo& info) : UnaryElementwise(info) { alpha_ = info.GetAttrOrDefault<float>("alpha", 1.702f); } Status ComputeInternal(OpKernelContext* context) const override; private: MAKE_FUNC_CTX_ALPHA() float alpha_; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/math/unary_elementwise_ops.h" #include "core/providers/cuda/math/binary_elementwise_ops.h" #include "core/providers/cuda/activation/activations.h" #include "activations_impl.h" using namespace onnxruntime::cuda; namespace onnxruntime { namespace contrib { namespace cuda { template <typename T> class Affine final : public UnaryElementwise { public: Affine(const OpKernelInfo& info) : UnaryElementwise(info) { ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; private: MAKE_FUNC_CTX_ALPHA_BETA() float alpha_; float beta_; }; template <typename T> class ParametricSoftplus final : public UnaryElementwise { public: ParametricSoftplus(const OpKernelInfo& info) : UnaryElementwise(info) { ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; private: MAKE_FUNC_CTX_ALPHA_BETA() float alpha_; float beta_; }; template <typename T> class ScaledTanh final : public UnaryElementwise { public: ScaledTanh(const OpKernelInfo& info) : UnaryElementwise(info) { ORT_ENFORCE(info.GetAttr("alpha", &alpha_).IsOK()); ORT_ENFORCE(info.GetAttr("beta", &beta_).IsOK()); } Status ComputeInternal(OpKernelContext* context) const override; private: MAKE_FUNC_CTX_ALPHA_BETA() float alpha_; float beta_; }; template <typename T> class Gelu final : public UnaryElementwise { public: Gelu(const OpKernelInfo& info) : UnaryElementwise(info) {} Status ComputeInternal(OpKernelContext* context) const override; private: MAKE_FUNC_CTX_NULL() }; template <typename T> class QuickGelu final : public UnaryElementwise { public: QuickGelu(const OpKernelInfo& info) : UnaryElementwise(info) { alpha_ = info.GetAttrOrDefault<float>("alpha", 1.702f); } Status ComputeInternal(OpKernelContext* context) const override; private: MAKE_FUNC_CTX_ALPHA() float alpha_; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
#include <hip/hip_runtime.h> #include "activations_impl.h" #include "core/providers/rocm/cu_inc/common.cuh" #include "core/providers/rocm/cu_inc/unary_elementwise_impl.cuh" using namespace onnxruntime::rocm; namespace onnxruntime { namespace contrib { namespace rocm { template <typename T> struct OP_Affine : public CtxAffine { __device__ __inline__ T operator()(const T& a) const { return a * (T)alpha + (T)beta; } }; template <typename T> struct OP_ParametricSoftplus : public CtxParametricSoftplus { __device__ __inline__ T operator()(const T& a) const { if (a > (T)0) return (T)alpha * (a * (T)beta + _Log(_Exp(-a * (T)beta) + (T)1)); else return (T)alpha * _Log(_Exp(a * (T)beta) + (T)1); } }; template <typename T> struct OP_ScaledTanh : public CtxScaledTanh { __device__ __inline__ T operator()(const T& a) const { return (T)alpha * _Tanh(a * (T)beta); } }; template <typename T> struct OP_Gelu : public CtxGelu { __device__ __inline__ T operator()(const T& a) const { return _Gelu(a); } }; template <> struct OP_Gelu<half> : public CtxGelu { __device__ __inline__ half operator()(const half& a) const { return static_cast<half>(_Gelu(static_cast<float>(a))); } }; template <typename T> struct OP_QuickGelu : public CtxQuickGelu { __device__ __inline__ T operator()(const T& a) const { T v = a * static_cast<T>(alpha); T one = static_cast<T>(1.f); T zero = static_cast<T>(0.f); T sigmoid = v >= zero ? one / (one + _Exp(-v)) : one - one / (one + _Exp(v)); return a * sigmoid; } }; #define UNARY_ACTIVATION_IMPL(name) UNARY_ACTIVATION_IMPL_DECLARATION(name) { UnaryElementWiseImpl(stream, input_data, output_data, *reinterpret_cast<const OP_##name<T>*>(func_ctx), count); } #define SPECIALIZED_UNARY_ACTIVATION_IMPL(name, T) template void Impl_##name<T>(hipStream_t stream, const T* input_data, T* output_data, const Ctx##name* func_ctx, size_t count); #define SPECIALIZED_UNARY_ACTIVATIONL_HFD(name) SPECIALIZED_UNARY_ACTIVATION_IMPL(name, half) SPECIALIZED_UNARY_ACTIVATION_IMPL(name, float) SPECIALIZED_UNARY_ACTIVATION_IMPL(name, double) #define UNARY_ACTIVATION_OP_NAME(name) UNARY_ACTIVATION_IMPL(name); SPECIALIZED_UNARY_ACTIVATIONL_HFD(name) UNARY_CONTRIB_ACTIVATION_OPS() #undef UNARY_ACTIVATION_OP_NAME } } } ###
#include <cuda_runtime.h> #include "activations_impl.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cu_inc/unary_elementwise_impl.cuh" using namespace onnxruntime::cuda; namespace onnxruntime { namespace contrib { namespace cuda { template <typename T> struct OP_Affine : public CtxAffine { __device__ __inline__ T operator()(const T& a) const { return a * (T)alpha + (T)beta; } }; template <typename T> struct OP_ParametricSoftplus : public CtxParametricSoftplus { __device__ __inline__ T operator()(const T& a) const { if (a > (T)0) return (T)alpha * (a * (T)beta + _Log(_Exp(-a * (T)beta) + (T)1)); else return (T)alpha * _Log(_Exp(a * (T)beta) + (T)1); } }; template <typename T> struct OP_ScaledTanh : public CtxScaledTanh { __device__ __inline__ T operator()(const T& a) const { return (T)alpha * _Tanh(a * (T)beta); } }; template <typename T> struct OP_Gelu : public CtxGelu { __device__ __inline__ T operator()(const T& a) const { return _Gelu(a); } }; template <> struct OP_Gelu<half> : public CtxGelu { __device__ __inline__ half operator()(const half& a) const { return static_cast<half>(_Gelu(static_cast<float>(a))); } }; template <typename T> struct OP_QuickGelu : public CtxQuickGelu { __device__ __inline__ T operator()(const T& a) const { T v = a * static_cast<T>(alpha); T one = static_cast<T>(1.f); T zero = static_cast<T>(0.f); T sigmoid = v >= zero ? one / (one + _Exp(-v)) : one - one / (one + _Exp(v)); return a * sigmoid; } }; #define UNARY_ACTIVATION_IMPL(name) UNARY_ACTIVATION_IMPL_DECLARATION(name) { UnaryElementWiseImpl(stream, input_data, output_data, *reinterpret_cast<const OP_##name<T>*>(func_ctx), count); } #define SPECIALIZED_UNARY_ACTIVATION_IMPL(name, T) template void Impl_##name<T>(cudaStream_t stream, const T* input_data, T* output_data, const Ctx##name* func_ctx, size_t count); #define SPECIALIZED_UNARY_ACTIVATIONL_HFD(name) SPECIALIZED_UNARY_ACTIVATION_IMPL(name, half) SPECIALIZED_UNARY_ACTIVATION_IMPL(name, float) SPECIALIZED_UNARY_ACTIVATION_IMPL(name, double) #define UNARY_ACTIVATION_OP_NAME(name) UNARY_ACTIVATION_IMPL(name); SPECIALIZED_UNARY_ACTIVATIONL_HFD(name) UNARY_CONTRIB_ACTIVATION_OPS() #undef UNARY_ACTIVATION_OP_NAME } } } ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/activation/activations_impl.h" namespace onnxruntime { namespace contrib { namespace rocm { typedef onnxruntime::rocm::CtxAlphaBeta CtxAffine; typedef onnxruntime::rocm::CtxAlphaBeta CtxParametricSoftplus; typedef onnxruntime::rocm::CtxAlphaBeta CtxScaledTanh; typedef onnxruntime::rocm::CtxNull CtxGelu; typedef onnxruntime::rocm::CtxAlpha CtxQuickGelu; #define UNARY_CONTRIB_ACTIVATION_OPS() \ UNARY_ACTIVATION_OP_NAME(ScaledTanh) \ UNARY_ACTIVATION_OP_NAME(Affine) \ UNARY_ACTIVATION_OP_NAME(ParametricSoftplus) \ UNARY_ACTIVATION_OP_NAME(Gelu) \ UNARY_ACTIVATION_OP_NAME(QuickGelu) #define UNARY_ACTIVATION_OP_NAME(name) UNARY_ACTIVATION_IMPL_DECLARATION(name); UNARY_CONTRIB_ACTIVATION_OPS() #undef UNARY_ACTIVATION_OP_NAME } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/activation/activations_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { typedef onnxruntime::cuda::CtxAlphaBeta CtxAffine; typedef onnxruntime::cuda::CtxAlphaBeta CtxParametricSoftplus; typedef onnxruntime::cuda::CtxAlphaBeta CtxScaledTanh; typedef onnxruntime::cuda::CtxNull CtxGelu; typedef onnxruntime::cuda::CtxAlpha CtxQuickGelu; #define UNARY_CONTRIB_ACTIVATION_OPS() \ UNARY_ACTIVATION_OP_NAME(ScaledTanh) \ UNARY_ACTIVATION_OP_NAME(Affine) \ UNARY_ACTIVATION_OP_NAME(ParametricSoftplus) \ UNARY_ACTIVATION_OP_NAME(Gelu) \ UNARY_ACTIVATION_OP_NAME(QuickGelu) #define UNARY_ACTIVATION_OP_NAME(name) UNARY_ACTIVATION_IMPL_DECLARATION(name); UNARY_CONTRIB_ACTIVATION_OPS() #undef UNARY_ACTIVATION_OP_NAME } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "contrib_ops/cpu/aten_ops/aten_op.h" namespace onnxruntime { namespace contrib { namespace rocm { ONNX_OPERATOR_KERNEL_EX( ATen, kPytorchAtenDomain, 1, kRocmExecutionProvider, (*KernelDefBuilder::Create()).TypeConstraint("T", DataTypeImpl::AllTensorAndSequenceTensorTypes()), onnxruntime::contrib::ATen); } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "contrib_ops/cpu/aten_ops/aten_op.h" namespace onnxruntime { namespace contrib { namespace cuda { ONNX_OPERATOR_KERNEL_EX( ATen, kPytorchAtenDomain, 1, kCudaExecutionProvider, (*KernelDefBuilder::Create()).TypeConstraint("T", DataTypeImpl::AllTensorAndSequenceTensorTypes()), onnxruntime::contrib::ATen); } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
#pragma once #include "core/providers/rocm/shared_inc/rocm_utils.h" namespace onnxruntime { namespace contrib { namespace rocm { template <typename T> void LaunchAddBiasTranspose( hipStream_t stream, const int num_matrices, const int format, const int max_threads_per_block, const int batch_size, const int sequence_length, const int num_heads, const int qk_head_size, const T* input, const T* biases, T* output, bool enable_half4, const int v_head_size, T* qkv_add_bias = nullptr, int total_matrix_count = -1, bool do_rotary = false, int original_past_sequence_length = 0); template <typename T> void LaunchAddBiasTransposeTrt( hipStream_t stream, const int max_threads_per_block, const int batch_size, const int sequence_length, const int num_heads, const int head_size, const T* biases, const T* query, const T* key, const T* value, T* output, bool is_cross_attention, int kv_sequence_length = -1); template <typename T> void LaunchAddBias( hipStream_t stream, const int max_threads_per_block, const int batch_size, const int sequence_length, const int kv_sequence_length, const int num_heads, const int head_size, const int v_head_size, const T* biases, const T* query, const T* key, const T* value, T* q, T* k, T* v); } } } ###
#pragma once #include "core/providers/cuda/shared_inc/cuda_utils.h" namespace onnxruntime { namespace contrib { namespace cuda { template <typename T> void LaunchAddBiasTranspose( cudaStream_t stream, const int num_matrices, const int format, const int max_threads_per_block, const int batch_size, const int sequence_length, const int num_heads, const int qk_head_size, const T* input, const T* biases, T* output, bool enable_half4, const int v_head_size, T* qkv_add_bias = nullptr, int total_matrix_count = -1, bool do_rotary = false, int original_past_sequence_length = 0); template <typename T> void LaunchAddBiasTransposeTrt( cudaStream_t stream, const int max_threads_per_block, const int batch_size, const int sequence_length, const int num_heads, const int head_size, const T* biases, const T* query, const T* key, const T* value, T* output, bool is_cross_attention, int kv_sequence_length = -1); template <typename T> void LaunchAddBias( cudaStream_t stream, const int max_threads_per_block, const int batch_size, const int sequence_length, const int kv_sequence_length, const int num_heads, const int head_size, const int v_head_size, const T* biases, const T* query, const T* key, const T* value, T* q, T* k, T* v); } } } ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/shared_inc/rocm_utils.h" #include <hip/hip_fp16.h> #include <rocblas/rocblas.h> namespace onnxruntime { namespace contrib { namespace rocm { // Build token indice for non-padding tokens and padding tokens. void LaunchGetTokenOffset(int* token_count_buffer, int* token_offset, int* cumulated_token_count, const int* sequence_token_count, const int batch_size, const int sequence_length, hipStream_t stream); // Remove paddings from input. template <typename T> void LaunchRemovePadding( T* output, const T* input, const int* token_offset, const int token_count, const int hidden_size, hipStream_t stream); // Rebuild paddings to restore output shape. template <typename T> void LaunchRestorePadding( T* output, const T* input, const int* token_offset, const int token_count, const int hidden_size, const int batch_size, const int sequence_length, hipStream_t stream); // Padding offset for TensorRT fused attention kernel void LaunchTrtSequenceOffset(int* trt_mha_padding_offset, const int* mask_index, const int batch_size, hipStream_t stream); void LaunchTrtSequenceOffset(int* trt_mha_padding_offset, const int* mask_index, const int batch_size, const int sequence_length, hipStream_t stream); void LaunchTrtSequenceOffset2d(int* trt_mha_padding_offset, const int* mask_index, const int batch_size, const int sequence_length, hipStream_t stream); } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/shared_inc/cuda_utils.h" #include <cuda_fp16.h> #include <cublas_v2.h> namespace onnxruntime { namespace contrib { namespace cuda { // Build token indice for non-padding tokens and padding tokens. void LaunchGetTokenOffset(int* token_count_buffer, int* token_offset, int* cumulated_token_count, const int* sequence_token_count, const int batch_size, const int sequence_length, cudaStream_t stream); // Remove paddings from input. template <typename T> void LaunchRemovePadding( T* output, const T* input, const int* token_offset, const int token_count, const int hidden_size, cudaStream_t stream); // Rebuild paddings to restore output shape. template <typename T> void LaunchRestorePadding( T* output, const T* input, const int* token_offset, const int token_count, const int hidden_size, const int batch_size, const int sequence_length, cudaStream_t stream); // Padding offset for TensorRT fused attention kernel void LaunchTrtSequenceOffset(int* trt_mha_padding_offset, const int* mask_index, const int batch_size, cudaStream_t stream); void LaunchTrtSequenceOffset(int* trt_mha_padding_offset, const int* mask_index, const int batch_size, const int sequence_length, cudaStream_t stream); void LaunchTrtSequenceOffset2d(int* trt_mha_padding_offset, const int* mask_index, const int batch_size, const int sequence_length, cudaStream_t stream); } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace contrib { namespace rocm { using namespace onnxruntime::rocm; template <typename T> class DecoderAttention final : public RocmKernel { public: DecoderAttention(const OpKernelInfo& info); Status ComputeInternal(OpKernelContext* context) const override; private: int num_heads_; float mask_filter_value_; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; template <typename T> class DecoderAttention final : public CudaKernel { public: DecoderAttention(const OpKernelInfo& info); Status ComputeInternal(OpKernelContext* context) const override; private: int num_heads_; float mask_filter_value_; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
#include "core/providers/rocm/rocm_common.h" #include "contrib_ops/cpu/bert/embed_layer_norm_helper.h" #include "embed_layer_norm.h" #include "embed_layer_norm_impl.h" namespace onnxruntime { namespace contrib { namespace rocm { #define REGISTER_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( EmbedLayerNormalization, kMSDomain, 1, T, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), EmbedLayerNorm<T>); REGISTER_KERNEL_TYPED(float) REGISTER_KERNEL_TYPED(MLFloat16) using namespace ONNX_NAMESPACE; template <typename T> EmbedLayerNorm<T>::EmbedLayerNorm(const OpKernelInfo& op_kernel_info) : RocmKernel(op_kernel_info) { ORT_ENFORCE(op_kernel_info.GetAttr<float>("epsilon", &epsilon_).IsOK()); ORT_ENFORCE(epsilon_ >= 0); } template <typename T> Status EmbedLayerNorm<T>::ComputeInternal(OpKernelContext* context) const { ORT_RETURN_IF_ERROR(embed_layer_norm::CheckInputs(context)); const Tensor* input_ids = context->Input<Tensor>(0); const Tensor* segment_ids = context->Input<Tensor>(1); const Tensor* word_embedding = context->Input<Tensor>(2); const Tensor* position_embedding = context->Input<Tensor>(3); const Tensor* segment_embedding = context->Input<Tensor>(4); const Tensor* gamma = context->Input<Tensor>(5); const Tensor* beta = context->Input<Tensor>(6); const Tensor* mask = context->Input<Tensor>(7); const Tensor* position_ids = context->Input<Tensor>(8); const auto& input_dims = input_ids->Shape().GetDims(); int64_t hidden_size = word_embedding->Shape()[1]; TensorShape output_shape({input_dims[0], input_dims[1], hidden_size}); Tensor* output = context->Output(0, output_shape); TensorShape mask_index_shape({input_dims[0]}); Tensor* mask_index = context->Output(1, mask_index_shape); Tensor* embedding_sum = context->Output(2, output_shape); int batch_size = static_cast<int>(input_dims[0]); int sequence_length = static_cast<int>(input_dims[1]); size_t element_size = sizeof(T); const bool broadcast_position_ids = (nullptr != position_ids && position_ids->Shape()[0] == 1); return LaunchEmbedLayerNormKernel( Stream(context), output->MutableData<T>(), nullptr == mask_index ? nullptr : mask_index->MutableData<int32_t>(), input_ids->Data<int32_t>(), nullptr == segment_ids ? nullptr : segment_ids->Data<int32_t>(), nullptr == mask ? nullptr : mask->Data<int32_t>(), gamma->Data<T>(), beta->Data<T>(), word_embedding->Data<T>(), position_embedding->Data<T>(), nullptr == segment_embedding ? nullptr : segment_embedding->Data<T>(), epsilon_, static_cast<int>(hidden_size), batch_size, sequence_length, element_size, embedding_sum == nullptr ? nullptr : embedding_sum->MutableData<T>(), position_ids == nullptr ? nullptr : position_ids->Data<int32_t>(), broadcast_position_ids); } } } } ###
#include "core/providers/cuda/cuda_common.h" #include "contrib_ops/cpu/bert/embed_layer_norm_helper.h" #include "embed_layer_norm.h" #include "embed_layer_norm_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { #define REGISTER_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( EmbedLayerNormalization, kMSDomain, 1, T, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), EmbedLayerNorm<T>); REGISTER_KERNEL_TYPED(float) REGISTER_KERNEL_TYPED(MLFloat16) using namespace ONNX_NAMESPACE; template <typename T> EmbedLayerNorm<T>::EmbedLayerNorm(const OpKernelInfo& op_kernel_info) : CudaKernel(op_kernel_info) { ORT_ENFORCE(op_kernel_info.GetAttr<float>("epsilon", &epsilon_).IsOK()); ORT_ENFORCE(epsilon_ >= 0); } template <typename T> Status EmbedLayerNorm<T>::ComputeInternal(OpKernelContext* context) const { ORT_RETURN_IF_ERROR(embed_layer_norm::CheckInputs(context)); const Tensor* input_ids = context->Input<Tensor>(0); const Tensor* segment_ids = context->Input<Tensor>(1); const Tensor* word_embedding = context->Input<Tensor>(2); const Tensor* position_embedding = context->Input<Tensor>(3); const Tensor* segment_embedding = context->Input<Tensor>(4); const Tensor* gamma = context->Input<Tensor>(5); const Tensor* beta = context->Input<Tensor>(6); const Tensor* mask = context->Input<Tensor>(7); const Tensor* position_ids = context->Input<Tensor>(8); const auto& input_dims = input_ids->Shape().GetDims(); int64_t hidden_size = word_embedding->Shape()[1]; TensorShape output_shape({input_dims[0], input_dims[1], hidden_size}); Tensor* output = context->Output(0, output_shape); TensorShape mask_index_shape({input_dims[0]}); Tensor* mask_index = context->Output(1, mask_index_shape); Tensor* embedding_sum = context->Output(2, output_shape); int batch_size = static_cast<int>(input_dims[0]); int sequence_length = static_cast<int>(input_dims[1]); size_t element_size = sizeof(T); const bool broadcast_position_ids = (nullptr != position_ids && position_ids->Shape()[0] == 1); return LaunchEmbedLayerNormKernel( Stream(context), output->MutableData<T>(), nullptr == mask_index ? nullptr : mask_index->MutableData<int32_t>(), input_ids->Data<int32_t>(), nullptr == segment_ids ? nullptr : segment_ids->Data<int32_t>(), nullptr == mask ? nullptr : mask->Data<int32_t>(), gamma->Data<T>(), beta->Data<T>(), word_embedding->Data<T>(), position_embedding->Data<T>(), nullptr == segment_embedding ? nullptr : segment_embedding->Data<T>(), epsilon_, static_cast<int>(hidden_size), batch_size, sequence_length, element_size, embedding_sum == nullptr ? nullptr : embedding_sum->MutableData<T>(), position_ids == nullptr ? nullptr : position_ids->Data<int32_t>(), broadcast_position_ids); } } } } ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace rocm { class Range final : public RocmKernel { public: explicit Range(const OpKernelInfo& info) : RocmKernel(info) {} Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace cuda { class Range final : public CudaKernel { public: explicit Range(const OpKernelInfo& info) : CudaKernel(info) {} Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace contrib { namespace rocm { using namespace onnxruntime::rocm; template <typename T> class EmbedLayerNorm final : public RocmKernel { public: EmbedLayerNorm(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* ctx) const override; private: float epsilon_; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; template <typename T> class EmbedLayerNorm final : public CudaKernel { public: EmbedLayerNorm(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* ctx) const override; private: float epsilon_; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" namespace onnxruntime { namespace contrib { namespace rocm { Status LaunchEmbedLayerNormKernel(hipStream_t stream, void* output, // output tensor void* mask_index, // output mask index const int* input_ids, // input word IDs const int* segment_ids, // input segment IDs const int* input_mask, // input mask const void* gamma, // weight for layer normalization const void* beta, // bias for layer normalization const void* word_embedding, // weights for word embeddings const void* position_embedding, // weights for position embeddings const void* segment_embedding, // weights for segment (like sentence) embeddings float epsilon, // epsilon for layer normalization const int hidden_size, // hidden size (that is head_size * num_heads) int batch_size, // batch size int sequence_length, // sequence length const size_t element_size, // size of output element: 2 for half, 4 for float. void* embedding_sum, // Optional output of sum of embeddings const int* position_ids, // Optional input of position ids const bool broadcast_position_ids); // Whether to broadcast position ids } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" namespace onnxruntime { namespace contrib { namespace cuda { Status LaunchEmbedLayerNormKernel(cudaStream_t stream, void* output, // output tensor void* mask_index, // output mask index const int* input_ids, // input word IDs const int* segment_ids, // input segment IDs const int* input_mask, // input mask const void* gamma, // weight for layer normalization const void* beta, // bias for layer normalization const void* word_embedding, // weights for word embeddings const void* position_embedding, // weights for position embeddings const void* segment_embedding, // weights for segment (like sentence) embeddings float epsilon, // epsilon for layer normalization const int hidden_size, // hidden size (that is head_size * num_heads) int batch_size, // batch size int sequence_length, // sequence length const size_t element_size, // size of output element: 2 for half, 4 for float. void* embedding_sum, // Optional output of sum of embeddings const int* position_ids, // Optional input of position ids const bool broadcast_position_ids); // Whether to broadcast position ids } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/rocm/rocm_kernel.h" #include "contrib_ops/cpu/bert/longformer_attention_base.h" namespace onnxruntime { namespace contrib { namespace rocm { using namespace onnxruntime::rocm; template <typename T> class LongformerAttention final : public RocmKernel, public LongformerAttentionBase { public: LongformerAttention(const OpKernelInfo& info); Status ComputeInternal(OpKernelContext* context) const override; private: bool use_compact_memory_; bool use_half4_; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/cuda/cuda_kernel.h" #include "contrib_ops/cpu/bert/longformer_attention_base.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; template <typename T> class LongformerAttention final : public CudaKernel, public LongformerAttentionBase { public: LongformerAttention(const OpKernelInfo& info); Status ComputeInternal(OpKernelContext* context) const override; private: bool use_compact_memory_; bool use_half4_; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/shared_inc/rocm_utils.h" namespace onnxruntime { namespace contrib { namespace rocm { size_t GetPinnedBufferSize( size_t batch_size); size_t GetLongformerAttentionWorkspaceSize( size_t element_size, size_t batch_size, size_t num_heads, size_t head_size, size_t sequence_length, size_t max_num_global, size_t window, bool disable_compact_memory); Status LaunchLongformerAttentionKernel( const hipDeviceProp_t& device_prop, // Device Properties rocblas_handle rocblas, // Rocblas handle hipStream_t stream, // ROCM stream const void* input, // Input tensor const void* bias, // Bias tensor const void* attention_mask, // Attention mask with shape (B, S) const void* global_input, // Global attention input, or nullptr when max_num_global == 0. const void* global_bias, // Global bias tensor const int* global_attention, // Global attention flags with shape (B, S) const int* global_index, // Global index const int* batch_global_num, // Number of global tokens per batch. It is in device memory. void* pinned_buffer, // Pinned memory: copy of batch_global_num, and a buffer to copy to scratch2. void* workspace, // Temporary buffer void* output, // Output tensor int batch_size, // Batch size (B) int sequence_length, // Sequence length (S) int num_heads, // Number of attention heads (N) int head_size, // Hidden layer size per head (H) int window, // One sided attention window (W) int max_num_global, // Maximum number of global tokens (G) const size_t element_size, // Element size of input tensor, bool disable_compact_memory, // Disable compact memory kernel bool use_merged_qkv_weights, bool use_half4); } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/shared_inc/cuda_utils.h" namespace onnxruntime { namespace contrib { namespace cuda { size_t GetPinnedBufferSize( size_t batch_size); size_t GetLongformerAttentionWorkspaceSize( size_t element_size, size_t batch_size, size_t num_heads, size_t head_size, size_t sequence_length, size_t max_num_global, size_t window, bool disable_compact_memory); Status LaunchLongformerAttentionKernel( const cudaDeviceProp& device_prop, // Device Properties cublasHandle_t cublas, // Cublas handle cudaStream_t stream, // CUDA stream const void* input, // Input tensor const void* bias, // Bias tensor const void* attention_mask, // Attention mask with shape (B, S) const void* global_input, // Global attention input, or nullptr when max_num_global == 0. const void* global_bias, // Global bias tensor const int* global_attention, // Global attention flags with shape (B, S) const int* global_index, // Global index const int* batch_global_num, // Number of global tokens per batch. It is in device memory. void* pinned_buffer, // Pinned memory: copy of batch_global_num, and a buffer to copy to scratch2. void* workspace, // Temporary buffer void* output, // Output tensor int batch_size, // Batch size (B) int sequence_length, // Sequence length (S) int num_heads, // Number of attention heads (N) int head_size, // Hidden layer size per head (H) int window, // One sided attention window (W) int max_num_global, // Maximum number of global tokens (G) const size_t element_size, // Element size of input tensor, bool disable_compact_memory, // Disable compact memory kernel bool use_merged_qkv_weights, bool use_half4); } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
/* Copyright (c) NVIDIA Corporation and Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include "core/common/common.h" namespace onnxruntime { namespace contrib { namespace rocm { // Launch the softmax kernels that does not use compact memory. Status LaunchLongformerSoftmaxSimpleKernel( hipStream_t stream, rocblas_handle rocblas, void* workspace, // softmax space const void* q, // transposed Q with shape (B, N, S, H) const void* k, // transposed K with shape (B, N, S, H) const void* v, // transposed V with shape (B, N, S, H) const void* attention_mask, // attention mask with shape (B, S), with value 0.0 not masked, and -10000.0 or torch.finfo(dtype).min masked. const void* global_q, // Q for global tokens with shape (B, N, S, H) const void* global_k, // K for global tokens with shape (B, N, S, H) const void* global_v, // V for global tokens with shape (B, N, S, H) const int* global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global. const int* global_index, // Global index with shape (B, S) const int* batch_global_num, // Number of global tokens per batch with shape (B, 1) void* pinned_buffer, // Pinned memory in CPU. Number of global tokens per batch with shape (B, 1) void* output, // output with shape (B, N, S, H) float scaler, // scalar int batch_size, // batch size int sequence_length, // sequence length int num_heads, // number of heads int head_size, // hidden size per head int attention_window, // one sided windows size size_t element_size); } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
/* Copyright (c) NVIDIA Corporation and Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include "core/common/common.h" namespace onnxruntime { namespace contrib { namespace cuda { // Launch the softmax kernels that does not use compact memory. Status LaunchLongformerSoftmaxSimpleKernel( cudaStream_t stream, cublasHandle_t cublas, void* workspace, // softmax space const void* q, // transposed Q with shape (B, N, S, H) const void* k, // transposed K with shape (B, N, S, H) const void* v, // transposed V with shape (B, N, S, H) const void* attention_mask, // attention mask with shape (B, S), with value 0.0 not masked, and -10000.0 or torch.finfo(dtype).min masked. const void* global_q, // Q for global tokens with shape (B, N, S, H) const void* global_k, // K for global tokens with shape (B, N, S, H) const void* global_v, // V for global tokens with shape (B, N, S, H) const int* global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global. const int* global_index, // Global index with shape (B, S) const int* batch_global_num, // Number of global tokens per batch with shape (B, 1) void* pinned_buffer, // Pinned memory in CPU. Number of global tokens per batch with shape (B, 1) void* output, // output with shape (B, N, S, H) float scaler, // scalar int batch_size, // batch size int sequence_length, // sequence length int num_heads, // number of heads int head_size, // hidden size per head int attention_window, // one sided windows size size_t element_size); } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
#include "hip/hip_runtime.h" /* Copyright (c) NVIDIA Corporation and Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hipcub/hipcub.hpp> #include <hipcub/device/device_partition.hpp> #include "core/providers/rocm/rocm_common.h" #include "core/providers/rocm/cu_inc/common.cuh" #include "longformer_global_impl.h" using namespace onnxruntime::rocm; using namespace hipcub; namespace onnxruntime { namespace contrib { namespace rocm { size_t GetGlobalScratchSize(int sequence_length) { // Global Index scratch layout: // [sequence_index: int S][tmp_storage: int 1024x1] return sizeof(int) * (sequence_length + 1024); } __global__ void InitSequenceIndexKernel(int* sequence_index, int sequence_length) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < sequence_length; i += blockDim.x) { sequence_index[i] = i; } } Status BuildGlobalIndex( const hipDeviceProp_t& device_prop, hipStream_t stream, const int* global_attention, int batch_size, int sequence_length, int* global_index, int* batch_global_num, void* scratch, size_t scratch_size) { int* sequence_index = (int*)scratch; int* tmp_storage = sequence_index + sequence_length; const int threads = device_prop.maxThreadsPerBlock; int blocks = CeilDiv(sequence_length, threads); InitSequenceIndexKernel<<<blocks, threads, 0, stream>>>(sequence_index, sequence_length); // Determine temporary device storage size. // For int* inputs/outputs, it need 767 bytes. We reserved 1024*4 bytes, which shall be enough. size_t temp_storage_bytes = 0; HIP_RETURN_IF_ERROR(hipcub::DevicePartition::Flagged( NULL, temp_storage_bytes, sequence_index, global_attention, global_index, batch_global_num, sequence_length, stream)); if (temp_storage_bytes + sizeof(int) * sequence_length > scratch_size) { ORT_THROW("LongformerAttention scratch space is not large enough. Temp storage bytes are", temp_storage_bytes); } // Find the global attention indices and number of global attention tokens for (int i = 0; i < batch_size; ++i) { HIP_RETURN_IF_ERROR(hipcub::DevicePartition::Flagged( reinterpret_cast<void*>(tmp_storage), temp_storage_bytes, sequence_index, global_attention + i * sequence_length, global_index + i * sequence_length, batch_global_num + i, sequence_length, stream)); } return Status::OK(); } } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
/* Copyright (c) NVIDIA Corporation and Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cub/cub.cuh> #include <cub/device/device_partition.cuh> #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "longformer_global_impl.h" using namespace onnxruntime::cuda; using namespace cub; namespace onnxruntime { namespace contrib { namespace cuda { size_t GetGlobalScratchSize(int sequence_length) { // Global Index scratch layout: // [sequence_index: int S][tmp_storage: int 1024x1] return sizeof(int) * (sequence_length + 1024); } __global__ void InitSequenceIndexKernel(int* sequence_index, int sequence_length) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < sequence_length; i += blockDim.x) { sequence_index[i] = i; } } Status BuildGlobalIndex( const cudaDeviceProp& device_prop, cudaStream_t stream, const int* global_attention, int batch_size, int sequence_length, int* global_index, int* batch_global_num, void* scratch, size_t scratch_size) { int* sequence_index = (int*)scratch; int* tmp_storage = sequence_index + sequence_length; const int threads = device_prop.maxThreadsPerBlock; int blocks = CeilDiv(sequence_length, threads); InitSequenceIndexKernel<<<blocks, threads, 0, stream>>>(sequence_index, sequence_length); // Determine temporary device storage size. // For int* inputs/outputs, it need 767 bytes. We reserved 1024*4 bytes, which shall be enough. size_t temp_storage_bytes = 0; CUDA_RETURN_IF_ERROR(cub::DevicePartition::Flagged( NULL, temp_storage_bytes, sequence_index, global_attention, global_index, batch_global_num, sequence_length, stream)); if (temp_storage_bytes + sizeof(int) * sequence_length > scratch_size) { ORT_THROW("LongformerAttention scratch space is not large enough. Temp storage bytes are", temp_storage_bytes); } // Find the global attention indices and number of global attention tokens for (int i = 0; i < batch_size; ++i) { CUDA_RETURN_IF_ERROR(cub::DevicePartition::Flagged( reinterpret_cast<void*>(tmp_storage), temp_storage_bytes, sequence_index, global_attention + i * sequence_length, global_index + i * sequence_length, batch_global_num + i, sequence_length, stream)); } return Status::OK(); } } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once namespace onnxruntime { namespace contrib { namespace rocm { // Size of global Index scratch in bytes. size_t GetGlobalScratchSize(int sequence_length); // Find the global attention indices and number of global attention tokens Status BuildGlobalIndex( const hipDeviceProp_t& device_prop, hipStream_t stream, const int* global_attention, int batch_size, int sequence_length, int* global_index, int* batch_global_num, void* scratch, size_t scratch_size); } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once namespace onnxruntime { namespace contrib { namespace cuda { // Size of global Index scratch in bytes. size_t GetGlobalScratchSize(int sequence_length); // Find the global attention indices and number of global attention tokens Status BuildGlobalIndex( const cudaDeviceProp& device_prop, cudaStream_t stream, const int* global_attention, int batch_size, int sequence_length, int* global_index, int* batch_global_num, void* scratch, size_t scratch_size); } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/rocm/rocm_common.h" #include "ngram_repeat_block.h" #include "ngram_repeat_block_impl.h" namespace onnxruntime { namespace contrib { namespace rocm { ONNX_OPERATOR_KERNEL_EX( NGramRepeatBlock, kMSDomain, 1, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("Tid", DataTypeImpl::GetTensorType<int64_t>()) .TypeConstraint("T", DataTypeImpl::GetTensorType<float>()), NGramRepeatBlock); using namespace ONNX_NAMESPACE; NGramRepeatBlock::NGramRepeatBlock(const OpKernelInfo& info) : RocmKernel(info) { ORT_ENFORCE(info.GetAttr<int64_t>("ngram_size", &ngram_size_).IsOK()); ORT_ENFORCE(ngram_size_ > 0); } Status NGramRepeatBlock::ComputeInternal(OpKernelContext* context) const { const Tensor* input_ids = context->Input<Tensor>(0); const Tensor* scores = context->Input<Tensor>(1); Tensor* output = context->Output(0, scores->Shape()); const auto* scores_source = static_cast<const float*>(scores->DataRaw()); auto* scores_target = static_cast<float*>(output->MutableDataRaw()); if (scores_source != scores_target) { HIP_RETURN_IF_ERROR(hipMemcpyAsync(scores_target, scores_source, scores->Shape().Size() * sizeof(float), hipMemcpyDeviceToDevice, Stream(context))); } const auto& input_ids_dims = input_ids->Shape().GetDims(); const auto& scores_dims = scores->Shape().GetDims(); ORT_ENFORCE(input_ids_dims.size() == 2); ORT_ENFORCE(scores_dims.size() == 2); int64_t batch_size = input_ids_dims[0]; int64_t cur_len = input_ids_dims[1]; ORT_ENFORCE(scores_dims[0] == batch_size); int64_t vocab_size = scores_dims[1]; if (cur_len + 1 < ngram_size_) { return Status::OK(); } const auto* input_ids_data = static_cast<const int64_t*>(input_ids->DataRaw(input_ids->DataType())); NGramRepeatBlockImpl( Stream(context), input_ids_data, scores_target, gsl::narrow_cast<int>(batch_size), gsl::narrow_cast<int>(cur_len - 1), gsl::narrow_cast<int>(cur_len), gsl::narrow_cast<int>(vocab_size), gsl::narrow_cast<int>(1), gsl::narrow_cast<int>(ngram_size_)); return Status::OK(); } } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "ngram_repeat_block.h" #include "ngram_repeat_block_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { ONNX_OPERATOR_KERNEL_EX( NGramRepeatBlock, kMSDomain, 1, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("Tid", DataTypeImpl::GetTensorType<int64_t>()) .TypeConstraint("T", DataTypeImpl::GetTensorType<float>()), NGramRepeatBlock); using namespace ONNX_NAMESPACE; NGramRepeatBlock::NGramRepeatBlock(const OpKernelInfo& info) : CudaKernel(info) { ORT_ENFORCE(info.GetAttr<int64_t>("ngram_size", &ngram_size_).IsOK()); ORT_ENFORCE(ngram_size_ > 0); } Status NGramRepeatBlock::ComputeInternal(OpKernelContext* context) const { const Tensor* input_ids = context->Input<Tensor>(0); const Tensor* scores = context->Input<Tensor>(1); Tensor* output = context->Output(0, scores->Shape()); const auto* scores_source = static_cast<const float*>(scores->DataRaw()); auto* scores_target = static_cast<float*>(output->MutableDataRaw()); if (scores_source != scores_target) { CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(scores_target, scores_source, scores->Shape().Size() * sizeof(float), cudaMemcpyDeviceToDevice, Stream(context))); } const auto& input_ids_dims = input_ids->Shape().GetDims(); const auto& scores_dims = scores->Shape().GetDims(); ORT_ENFORCE(input_ids_dims.size() == 2); ORT_ENFORCE(scores_dims.size() == 2); int64_t batch_size = input_ids_dims[0]; int64_t cur_len = input_ids_dims[1]; ORT_ENFORCE(scores_dims[0] == batch_size); int64_t vocab_size = scores_dims[1]; if (cur_len + 1 < ngram_size_) { return Status::OK(); } const auto* input_ids_data = static_cast<const int64_t*>(input_ids->DataRaw(input_ids->DataType())); NGramRepeatBlockImpl( Stream(context), input_ids_data, scores_target, gsl::narrow_cast<int>(batch_size), gsl::narrow_cast<int>(cur_len - 1), gsl::narrow_cast<int>(cur_len), gsl::narrow_cast<int>(vocab_size), gsl::narrow_cast<int>(1), gsl::narrow_cast<int>(ngram_size_)); return Status::OK(); } } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace contrib { namespace rocm { using namespace onnxruntime::rocm; class NGramRepeatBlock final : public RocmKernel { public: NGramRepeatBlock(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* ctx) const override; private: int64_t ngram_size_; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; class NGramRepeatBlock final : public CudaKernel { public: NGramRepeatBlock(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* ctx) const override; private: int64_t ngram_size_; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
#include "hip/hip_runtime.h" /* Copyright (c) Microsoft Corporation. Licensed under the MIT License. */ /* Kernel implementation for blocking repeated n-grams. */ #include "core/providers/rocm/cu_inc/common.cuh" #include "contrib_ops/rocm/bert/ngram_repeat_block_impl.h" namespace onnxruntime { namespace contrib { namespace rocm { using namespace onnxruntime::rocm; // Ban repeated ngrams of length = 'no_repeat_ngram_size' __global__ void banRepeatedTokens(const int64_t* __restrict__ tokens, float* __restrict__ lprobs, int max_predict_len, int vocab_size, int no_repeat_ngram_size) { auto row = blockIdx.x; auto col = threadIdx.x; auto start = row * (max_predict_len) + col; // Each thread compares ngram starting from // thread index with final ngram starting from // step - no_repeat_ngram_size +2 auto check_start_pos = blockDim.x; auto lprob_start = row * vocab_size; bool is_banned = true; extern __shared__ int64_t tokens_shm[]; tokens_shm[col] = tokens[start]; if (col == blockDim.x - 1) { for (int i = 1; i < no_repeat_ngram_size; i++) { if (col + i < max_predict_len) { tokens_shm[col + i] = tokens[start + i]; } } } __syncthreads(); for (int k = 0; k < no_repeat_ngram_size - 1; k++) { if (tokens_shm[col + k] != tokens_shm[check_start_pos + k]) { is_banned = false; } } if (is_banned == true) { auto token_to_be_banned = tokens_shm[col + no_repeat_ngram_size - 1]; lprobs[lprob_start + token_to_be_banned] = -INFINITY; } } // Allocate blocks and threads based on // batch size and sequence length and launch // kernel void NGramRepeatBlockImpl( hipStream_t stream, const int64_t* tokens_ptr, float* scores_ptr, int bsz, int step, int max_predict_len, int vocab_size, int beam_size, int no_repeat_ngram_size) { int threads = step - no_repeat_ngram_size + 2; if (threads <= 0) return; int blocks = bsz * beam_size; int shared_mem_size = (step + 1) * sizeof(int64_t); // Launching N blocks where N is number of samples in a batch (beams*bsz) // Launching T threads where T is number of previous ngrams in a sample // Allocating shared mem per block for fastser access of input tokens since // each token will be accessed N times to compare with current Ngram where // N is Ngram size. banRepeatedTokens<<<blocks, threads, shared_mem_size, stream>>>( tokens_ptr, scores_ptr, max_predict_len, vocab_size, no_repeat_ngram_size); } } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
/* Copyright (c) Microsoft Corporation. Licensed under the MIT License. */ /* Kernel implementation for blocking repeated n-grams. */ #include "core/providers/cuda/cu_inc/common.cuh" #include "contrib_ops/cuda/bert/ngram_repeat_block_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; // Ban repeated ngrams of length = 'no_repeat_ngram_size' __global__ void banRepeatedTokens(const int64_t* __restrict__ tokens, float* __restrict__ lprobs, int max_predict_len, int vocab_size, int no_repeat_ngram_size) { auto row = blockIdx.x; auto col = threadIdx.x; auto start = row * (max_predict_len) + col; // Each thread compares ngram starting from // thread index with final ngram starting from // step - no_repeat_ngram_size +2 auto check_start_pos = blockDim.x; auto lprob_start = row * vocab_size; bool is_banned = true; extern __shared__ int64_t tokens_shm[]; tokens_shm[col] = tokens[start]; if (col == blockDim.x - 1) { for (int i = 1; i < no_repeat_ngram_size; i++) { if (col + i < max_predict_len) { tokens_shm[col + i] = tokens[start + i]; } } } __syncthreads(); for (int k = 0; k < no_repeat_ngram_size - 1; k++) { if (tokens_shm[col + k] != tokens_shm[check_start_pos + k]) { is_banned = false; } } if (is_banned == true) { auto token_to_be_banned = tokens_shm[col + no_repeat_ngram_size - 1]; lprobs[lprob_start + token_to_be_banned] = -INFINITY; } } // Allocate blocks and threads based on // batch size and sequence length and launch // kernel void NGramRepeatBlockImpl( cudaStream_t stream, const int64_t* tokens_ptr, float* scores_ptr, int bsz, int step, int max_predict_len, int vocab_size, int beam_size, int no_repeat_ngram_size) { int threads = step - no_repeat_ngram_size + 2; if (threads <= 0) return; int blocks = bsz * beam_size; int shared_mem_size = (step + 1) * sizeof(int64_t); // Launching N blocks where N is number of samples in a batch (beams*bsz) // Launching T threads where T is number of previous ngrams in a sample // Allocating shared mem per block for fastser access of input tokens since // each token will be accessed N times to compare with current Ngram where // N is Ngram size. banRepeatedTokens<<<blocks, threads, shared_mem_size, stream>>>( tokens_ptr, scores_ptr, max_predict_len, vocab_size, no_repeat_ngram_size); } } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
#include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <hipcub/hipcub.hpp> #include <rocblas/rocblas.h> #include <hip/hip_fp16.h> #include "core/providers/rocm/cu_inc/common.cuh" #include "core/providers/rocm/rocm_common.h" #include "range_impl.h" using namespace onnxruntime::rocm; namespace onnxruntime { namespace rocm { template <typename T> __global__ void RangeKernel(const T start, const T delta, const int count, T* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < count) { output[index] = start + delta * index; } } template <typename T> Status RangeImpl(hipStream_t stream, const T start, const T delta, const int count, T* output) { constexpr int block_size = 256; int grid_size = (count + block_size - 1) / block_size; RangeKernel<T><<<grid_size, block_size, 0, stream>>>(start, delta, count, output); return HIP_CALL(hipGetLastError()); } #define SPECIALIZED_IMPL(T) \ template Status RangeImpl<T>(hipStream_t stream, const T start, const T delta, const int count, T* output); SPECIALIZED_IMPL(int16_t) SPECIALIZED_IMPL(int32_t) SPECIALIZED_IMPL(int64_t) SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(double) } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <cub/cub.cuh> #include <cublas_v2.h> #include <cuda_fp16.h> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "range_impl.h" using namespace onnxruntime::cuda; namespace onnxruntime { namespace cuda { template <typename T> __global__ void RangeKernel(const T start, const T delta, const int count, T* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < count) { output[index] = start + delta * index; } } template <typename T> Status RangeImpl(cudaStream_t stream, const T start, const T delta, const int count, T* output) { constexpr int block_size = 256; int grid_size = (count + block_size - 1) / block_size; RangeKernel<T><<<grid_size, block_size, 0, stream>>>(start, delta, count, output); return CUDA_CALL(cudaGetLastError()); } #define SPECIALIZED_IMPL(T) \ template Status RangeImpl<T>(cudaStream_t stream, const T start, const T delta, const int count, T* output); SPECIALIZED_IMPL(int16_t) SPECIALIZED_IMPL(int32_t) SPECIALIZED_IMPL(int64_t) SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(double) } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/shared_inc/rocm_utils.h" namespace onnxruntime { namespace contrib { namespace rocm { using namespace onnxruntime::rocm; void NGramRepeatBlockImpl( hipStream_t stream, const int64_t* tokens_ptr, float* scores_ptr, int bsz, int step, int max_predict_len, int vocab_size, int beam_size, int no_repeat_ngram_size); } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/shared_inc/cuda_utils.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; void NGramRepeatBlockImpl( cudaStream_t stream, const int64_t* tokens_ptr, float* scores_ptr, int bsz, int step, int max_predict_len, int vocab_size, int beam_size, int no_repeat_ngram_size); } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace contrib { namespace rocm { using namespace onnxruntime::rocm; template <typename T> class RemovePadding final : public RocmKernel { public: RemovePadding(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* ctx) const override; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; template <typename T> class RemovePadding final : public CudaKernel { public: RemovePadding(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* ctx) const override; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/rocm/rocm_common.h" #include "contrib_ops/rocm/bert/restore_padding.h" #include "contrib_ops/rocm/bert/bert_padding.h" namespace onnxruntime { namespace contrib { namespace rocm { #define REGISTER_KERNEL_TYPED(T) \ ONNX_OPERATOR_TYPED_KERNEL_EX( \ RestorePadding, \ kMSDomain, \ 1, \ T, \ kRocmExecutionProvider, \ (*KernelDefBuilder::Create()) \ .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), \ RestorePadding<T>); REGISTER_KERNEL_TYPED(float) REGISTER_KERNEL_TYPED(MLFloat16) using namespace ONNX_NAMESPACE; template <typename T> RestorePadding<T>::RestorePadding(const OpKernelInfo& op_kernel_info) : RocmKernel(op_kernel_info) { } template <typename T> Status RestorePadding<T>::ComputeInternal(OpKernelContext* context) const { // shape of inputs: // input: (total_tokens, hidden_size) // token_offset: (batch_size, sequence_length) // shape of outputs: // output: (batch_size, sequence_length, hidden_size) const Tensor* input = context->Input<Tensor>(0); const Tensor* token_offset = context->Input<Tensor>(1); const auto& dims = input->Shape().GetDims(); if (dims.size() != 2) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input 'input' is expected to have 2 dimensions, got ", dims.size()); } int64_t total_tokens = dims[0]; int64_t hidden_size = dims[1]; const auto& token_offset_dims = token_offset->Shape().GetDims(); if (token_offset_dims.size() != 2) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input 'token_offset' is expected to have 2 dimensions, got ", token_offset_dims.size()); } int64_t batch_size = token_offset_dims[0]; int64_t sequence_length = token_offset_dims[1]; TensorShapeVector output_shape(3); output_shape[0] = batch_size; output_shape[1] = sequence_length; output_shape[2] = hidden_size; Tensor* output = context->Output(0, output_shape); typedef typename ToHipType<T>::MappedType HipT; LaunchRestorePadding<HipT>( reinterpret_cast<HipT*>(output->MutableData<T>()), reinterpret_cast<const HipT*>(input->Data<T>()), token_offset->Data<int>(), static_cast<int>(total_tokens), static_cast<int>(hidden_size), static_cast<int>(batch_size), static_cast<int>(sequence_length), Stream(context)); HIP_RETURN_IF_ERROR(hipGetLastError()); return Status::OK(); } } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "contrib_ops/cuda/bert/restore_padding.h" #include "contrib_ops/cuda/bert/bert_padding.h" namespace onnxruntime { namespace contrib { namespace cuda { #define REGISTER_KERNEL_TYPED(T) \ ONNX_OPERATOR_TYPED_KERNEL_EX( \ RestorePadding, \ kMSDomain, \ 1, \ T, \ kCudaExecutionProvider, \ (*KernelDefBuilder::Create()) \ .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), \ RestorePadding<T>); REGISTER_KERNEL_TYPED(float) REGISTER_KERNEL_TYPED(MLFloat16) using namespace ONNX_NAMESPACE; template <typename T> RestorePadding<T>::RestorePadding(const OpKernelInfo& op_kernel_info) : CudaKernel(op_kernel_info) { } template <typename T> Status RestorePadding<T>::ComputeInternal(OpKernelContext* context) const { // shape of inputs: // input: (total_tokens, hidden_size) // token_offset: (batch_size, sequence_length) // shape of outputs: // output: (batch_size, sequence_length, hidden_size) const Tensor* input = context->Input<Tensor>(0); const Tensor* token_offset = context->Input<Tensor>(1); const auto& dims = input->Shape().GetDims(); if (dims.size() != 2) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input 'input' is expected to have 2 dimensions, got ", dims.size()); } int64_t total_tokens = dims[0]; int64_t hidden_size = dims[1]; const auto& token_offset_dims = token_offset->Shape().GetDims(); if (token_offset_dims.size() != 2) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Input 'token_offset' is expected to have 2 dimensions, got ", token_offset_dims.size()); } int64_t batch_size = token_offset_dims[0]; int64_t sequence_length = token_offset_dims[1]; TensorShapeVector output_shape(3); output_shape[0] = batch_size; output_shape[1] = sequence_length; output_shape[2] = hidden_size; Tensor* output = context->Output(0, output_shape); typedef typename ToCudaType<T>::MappedType CudaT; LaunchRestorePadding<CudaT>( reinterpret_cast<CudaT*>(output->MutableData<T>()), reinterpret_cast<const CudaT*>(input->Data<T>()), token_offset->Data<int>(), static_cast<int>(total_tokens), static_cast<int>(hidden_size), static_cast<int>(batch_size), static_cast<int>(sequence_length), Stream(context)); CUDA_RETURN_IF_ERROR(cudaGetLastError()); return Status::OK(); } } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace contrib { namespace rocm { using namespace onnxruntime::rocm; template <typename T> class RestorePadding final : public RocmKernel { public: RestorePadding(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* ctx) const override; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; template <typename T> class RestorePadding final : public CudaKernel { public: RestorePadding(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* ctx) const override; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/rocm_common.h" namespace onnxruntime { namespace contrib { namespace rocm { // A wrapper class of hipEvent_t to destroy the event automatically for avoiding memory leak. class AutoDestoryCudaEvent { public: AutoDestoryCudaEvent() : rocm_event_(nullptr) { } ~AutoDestoryCudaEvent() { if (rocm_event_ != nullptr) (void)hipEventDestroy(rocm_event_); } hipEvent_t& Get() { return rocm_event_; } private: hipEvent_t rocm_event_; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/cuda_common.h" namespace onnxruntime { namespace contrib { namespace cuda { // A wrapper class of cudaEvent_t to destroy the event automatically for avoiding memory leak. class AutoDestoryCudaEvent { public: AutoDestoryCudaEvent() : cuda_event_(nullptr) { } ~AutoDestoryCudaEvent() { if (cuda_event_ != nullptr) (void)cudaEventDestroy(cuda_event_); } cudaEvent_t& Get() { return cuda_event_; } private: cudaEvent_t cuda_event_; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #if defined(USE_MPI) #define OMPI_SKIP_MPICXX 1 // See https://github.com/open-mpi/ompi/issues/5157 #include <mpi.h> #undef OMPI_SKIP_MPICXX namespace onnxruntime { #if defined(USE_MPI) #define MPI_CHECK(condition) \ do { \ int error = (condition); \ ORT_ENFORCE( \ error == MPI_SUCCESS, \ "MPI Error at: ", \ __FILE__, \ ":", \ __LINE__, \ ": ", \ error); \ } while (0) #endif } // namespace onnxruntime #endif ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #if defined(USE_MPI) #define OMPI_SKIP_MPICXX 1 // See https://github.com/open-mpi/ompi/issues/5157 #include <mpi.h> #undef OMPI_SKIP_MPICXX namespace onnxruntime { #if defined(USE_MPI) #define MPI_CHECK(condition) \ do { \ int error = (condition); \ ORT_ENFORCE( \ error == MPI_SUCCESS, \ "MPI Error at: ", \ __FILE__, \ ":", \ __LINE__, \ ": ", \ error); \ } while (0) #endif } // namespace onnxruntime #endif ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/rocm_kernel.h" #if defined(ORT_USE_NCCL) #include <rccl/rccl.h> #endif namespace onnxruntime { namespace contrib { namespace rocm { // ----------------------------------------------------------------------- // Defines a new version of nccl classes // that independent with training::DistributedRunContext, only rely on MPI // ----------------------------------------------------------------------- class NcclContext final { public: NcclContext(); ~NcclContext(); ncclComm_t Comm() { return comm_; } int Rank() const { return rank_; } int Size() const { return world_size_; } private: ncclComm_t comm_; int rank_; int world_size_; }; class NcclKernel : public ::onnxruntime::rocm::RocmKernel { public: explicit NcclKernel(const OpKernelInfo& info); protected: NcclContext* nccl_ = nullptr; }; /* * Defines new version of Nccl classes that independent with training::DistributedContext * only rely on MPI */ class AllReduce final : public NcclKernel { public: explicit AllReduce(const OpKernelInfo& info); Status ComputeInternal(OpKernelContext* context) const override; }; class AllGather final : public NcclKernel { public: explicit AllGather(const OpKernelInfo& info); Status ComputeInternal(OpKernelContext* context) const override; private: int64_t group_size_ = -1; int64_t axis_ = -1; const ROCMExecutionProvider* rocm_ep_; }; class AllToAll final : public NcclKernel { public: explicit AllToAll(const OpKernelInfo& info); Status ComputeInternal(OpKernelContext* context) const override; private: int64_t group_size_ = -1; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/cuda_kernel.h" #if defined(ORT_USE_NCCL) #include <nccl.h> #endif namespace onnxruntime { namespace contrib { namespace cuda { // ----------------------------------------------------------------------- // Defines a new version of nccl classes // that independent with training::DistributedRunContext, only rely on MPI // ----------------------------------------------------------------------- class NcclContext final { public: NcclContext(); ~NcclContext(); ncclComm_t Comm() { return comm_; } int Rank() const { return rank_; } int Size() const { return world_size_; } private: ncclComm_t comm_; int rank_; int world_size_; }; class NcclKernel : public ::onnxruntime::cuda::CudaKernel { public: explicit NcclKernel(const OpKernelInfo& info); protected: NcclContext* nccl_ = nullptr; }; /* * Defines new version of Nccl classes that independent with training::DistributedContext * only rely on MPI */ class AllReduce final : public NcclKernel { public: explicit AllReduce(const OpKernelInfo& info); Status ComputeInternal(OpKernelContext* context) const override; }; class AllGather final : public NcclKernel { public: explicit AllGather(const OpKernelInfo& info); Status ComputeInternal(OpKernelContext* context) const override; private: int64_t group_size_ = -1; int64_t axis_ = -1; const CUDAExecutionProvider* cuda_ep_; }; class AllToAll final : public NcclKernel { public: explicit AllToAll(const OpKernelInfo& info); Status ComputeInternal(OpKernelContext* context) const override; private: int64_t group_size_ = -1; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
#include "core/providers/rocm/rocm_common.h" #include "contrib_ops/rocm/diffusion/bias_add.h" #include "contrib_ops/rocm/diffusion/bias_add_impl.h" namespace onnxruntime { namespace contrib { namespace rocm { #define REGISTER_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( BiasAdd, kMSDomain, 1, T, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), BiasAdd<T>); REGISTER_KERNEL_TYPED(MLFloat16); REGISTER_KERNEL_TYPED(float); using namespace ONNX_NAMESPACE; template <typename T> BiasAdd<T>::BiasAdd(const OpKernelInfo& op_info) : RocmKernel(op_info) { } template <typename T> Status BiasAdd<T>::ComputeInternal(OpKernelContext* context) const { const Tensor* input = context->Input<Tensor>(0); const auto& input_dims = input->Shape().GetDims(); if (input_dims.size() != 3) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "The input is expected to have 3 dimensions, got ", input_dims.size()); } if (input_dims[2] != 320 && input_dims[2] != 640 && input_dims[2] != 1280) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Number of channels should be 320, 640 or 1280, got ", input_dims[2]); } const Tensor* bias = context->Input<Tensor>(1); const auto& bias_dims = bias->Shape().GetDims(); if (bias_dims.size() != 1) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "The bias is expected to have 1 dimensions, got ", bias_dims.size()); } if (bias_dims[0] != input_dims[2]) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Number of channels in the last dimension of input and bias are not the same"); } const Tensor* skip = context->Input<Tensor>(2); if (skip->Shape() != input->Shape()) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Shape of input and skip (residual) shall be the same"); } Tensor* output = context->Output(0, input->Shape()); typedef typename ToHipType<T>::MappedType HipT; const int32_t grid_size = static_cast<int32_t>(input_dims[0] * input_dims[1]); LaunchBiasAddKernel<HipT>(Stream(context), grid_size, static_cast<int32_t>(input_dims[2]), reinterpret_cast<const HipT*>(input->Data<T>()), reinterpret_cast<const HipT*>(bias->Data<T>()), reinterpret_cast<const HipT*>(skip->Data<T>()), reinterpret_cast<HipT*>(output->MutableData<T>())); HIP_RETURN_IF_ERROR(hipPeekAtLastError()); return Status::OK(); } } } } ###
#include "core/providers/cuda/cuda_common.h" #include "contrib_ops/cuda/diffusion/bias_add.h" #include "contrib_ops/cuda/diffusion/bias_add_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { #define REGISTER_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( BiasAdd, kMSDomain, 1, T, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), BiasAdd<T>); REGISTER_KERNEL_TYPED(MLFloat16); REGISTER_KERNEL_TYPED(float); using namespace ONNX_NAMESPACE; template <typename T> BiasAdd<T>::BiasAdd(const OpKernelInfo& op_info) : CudaKernel(op_info) { } template <typename T> Status BiasAdd<T>::ComputeInternal(OpKernelContext* context) const { const Tensor* input = context->Input<Tensor>(0); const auto& input_dims = input->Shape().GetDims(); if (input_dims.size() != 3) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "The input is expected to have 3 dimensions, got ", input_dims.size()); } if (input_dims[2] != 320 && input_dims[2] != 640 && input_dims[2] != 1280) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Number of channels should be 320, 640 or 1280, got ", input_dims[2]); } const Tensor* bias = context->Input<Tensor>(1); const auto& bias_dims = bias->Shape().GetDims(); if (bias_dims.size() != 1) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "The bias is expected to have 1 dimensions, got ", bias_dims.size()); } if (bias_dims[0] != input_dims[2]) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Number of channels in the last dimension of input and bias are not the same"); } const Tensor* skip = context->Input<Tensor>(2); if (skip->Shape() != input->Shape()) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Shape of input and skip (residual) shall be the same"); } Tensor* output = context->Output(0, input->Shape()); typedef typename ToCudaType<T>::MappedType CudaT; const int32_t grid_size = static_cast<int32_t>(input_dims[0] * input_dims[1]); LaunchBiasAddKernel<CudaT>(Stream(context), grid_size, static_cast<int32_t>(input_dims[2]), reinterpret_cast<const CudaT*>(input->Data<T>()), reinterpret_cast<const CudaT*>(bias->Data<T>()), reinterpret_cast<const CudaT*>(skip->Data<T>()), reinterpret_cast<CudaT*>(output->MutableData<T>())); CUDA_RETURN_IF_ERROR(cudaPeekAtLastError()); return Status::OK(); } } } } ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace contrib { namespace rocm { using namespace onnxruntime::rocm; template <typename T> class BiasAdd final : public RocmKernel { public: BiasAdd(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; template <typename T> class BiasAdd final : public CudaKernel { public: BiasAdd(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
#include "hip/hip_runtime.h" #include <hipcub/hipcub.hpp> #include "core/providers/rocm/cu_inc/common.cuh" #include "contrib_ops/rocm/diffusion/bias_add_impl.h" using namespace onnxruntime::rocm; namespace onnxruntime { namespace contrib { namespace rocm { template <typename T, int32_t C, int32_t TPB> __global__ void BiasAddKernel(T const* input, T const* bias, T const* residual, T* output) { int32_t base_offset = blockIdx.x * C + threadIdx.x; int32_t bias_offset = threadIdx.x; #pragma unroll for (int32_t i = 0; i < C / TPB; ++i) { output[base_offset] = input[base_offset] + bias[bias_offset] + residual[base_offset]; base_offset += TPB; bias_offset += TPB; } } template __global__ void BiasAddKernel<float, 320, 320>(float const*, float const*, float const*, float*); template __global__ void BiasAddKernel<float, 640, 320>(float const*, float const*, float const*, float*); template __global__ void BiasAddKernel<float, 1280, 320>(float const*, float const*, float const*, float*); template __global__ void BiasAddKernel<half, 320, 320>(half const*, half const*, half const*, half*); template __global__ void BiasAddKernel<half, 640, 320>(half const*, half const*, half const*, half*); template __global__ void BiasAddKernel<half, 1280, 320>(half const*, half const*, half const*, half*); template <typename T> void LaunchBiasAddKernel(hipStream_t stream, int32_t grid_size, int32_t num_channels, T const* input, T const* bias, T const* residual, T* output) { constexpr int32_t TPB = 320; switch (num_channels) { case 320: (BiasAddKernel<T, 320, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, residual, output); break; case 640: (BiasAddKernel<T, 640, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, residual, output); break; case 1280: (BiasAddKernel<T, 1280, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, residual, output); break; default: ORT_NOT_IMPLEMENTED("Not implemented"); } } template void LaunchBiasAddKernel<float>(hipStream_t stream, int32_t grid_size, int32_t num_channels, float const* input, float const* bias, float const* residual, float* output); template void LaunchBiasAddKernel<half>(hipStream_t stream, int32_t grid_size, int32_t num_channels, half const* input, half const* bias, half const* residual, half* output); } } } ###
#include <cub/cub.cuh> #include "core/providers/cuda/cu_inc/common.cuh" #include "contrib_ops/cuda/diffusion/bias_add_impl.h" using namespace onnxruntime::cuda; namespace onnxruntime { namespace contrib { namespace cuda { template <typename T, int32_t C, int32_t TPB> __global__ void BiasAddKernel(T const* input, T const* bias, T const* residual, T* output) { int32_t base_offset = blockIdx.x * C + threadIdx.x; int32_t bias_offset = threadIdx.x; #pragma unroll for (int32_t i = 0; i < C / TPB; ++i) { output[base_offset] = input[base_offset] + bias[bias_offset] + residual[base_offset]; base_offset += TPB; bias_offset += TPB; } } template __global__ void BiasAddKernel<float, 320, 320>(float const*, float const*, float const*, float*); template __global__ void BiasAddKernel<float, 640, 320>(float const*, float const*, float const*, float*); template __global__ void BiasAddKernel<float, 1280, 320>(float const*, float const*, float const*, float*); template __global__ void BiasAddKernel<half, 320, 320>(half const*, half const*, half const*, half*); template __global__ void BiasAddKernel<half, 640, 320>(half const*, half const*, half const*, half*); template __global__ void BiasAddKernel<half, 1280, 320>(half const*, half const*, half const*, half*); template <typename T> void LaunchBiasAddKernel(cudaStream_t stream, int32_t grid_size, int32_t num_channels, T const* input, T const* bias, T const* residual, T* output) { constexpr int32_t TPB = 320; switch (num_channels) { case 320: (BiasAddKernel<T, 320, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, residual, output); break; case 640: (BiasAddKernel<T, 640, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, residual, output); break; case 1280: (BiasAddKernel<T, 1280, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, residual, output); break; default: ORT_NOT_IMPLEMENTED("Not implemented"); } } template void LaunchBiasAddKernel<float>(cudaStream_t stream, int32_t grid_size, int32_t num_channels, float const* input, float const* bias, float const* residual, float* output); template void LaunchBiasAddKernel<half>(cudaStream_t stream, int32_t grid_size, int32_t num_channels, half const* input, half const* bias, half const* residual, half* output); } } } ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/shared_inc/rocm_utils.h" namespace onnxruntime { namespace rocm { template <typename T> Status RangeImpl(hipStream_t stream, const T start, const T delta, const int count, T* output); } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/shared_inc/cuda_utils.h" namespace onnxruntime { namespace cuda { template <typename T> Status RangeImpl(cudaStream_t stream, const T start, const T delta, const int count, T* output); } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/common/status.h" #include <hip/hip_runtime.h> namespace onnxruntime { namespace contrib { namespace rocm { template <typename T> void LaunchBiasAddKernel(hipStream_t stream, int32_t grid_size, int32_t num_channels, T const* input, T const* bias, T const* residual, T* output); } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/common/status.h" #include <cuda.h> namespace onnxruntime { namespace contrib { namespace cuda { template <typename T> void LaunchBiasAddKernel(cudaStream_t stream, int32_t grid_size, int32_t num_channels, T const* input, T const* bias, T const* residual, T* output); } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
#include "core/providers/rocm/rocm_common.h" #include "contrib_ops/rocm/diffusion/bias_split_gelu.h" #include "contrib_ops/rocm/diffusion/bias_split_gelu_impl.h" namespace onnxruntime { namespace contrib { namespace rocm { #define REGISTER_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( BiasSplitGelu, kMSDomain, 1, T, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), BiasSplitGelu<T>); REGISTER_KERNEL_TYPED(MLFloat16); REGISTER_KERNEL_TYPED(float); using namespace ONNX_NAMESPACE; template <typename T> BiasSplitGelu<T>::BiasSplitGelu(const OpKernelInfo& op_info) : RocmKernel(op_info) { } template <typename T> Status BiasSplitGelu<T>::ComputeInternal(OpKernelContext* context) const { const Tensor* input = context->Input<Tensor>(0); const auto& input_dims = input->Shape().GetDims(); if (input_dims.size() != 3) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "input is expected to have 3 dimensions, got ", input_dims.size()); } if (input_dims[2] != 2560 && input_dims[2] != 5120 && input_dims[2] != 10240) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "hidden size should be 2560, 5120 or 10240, got ", input_dims[2]); } const Tensor* bias = context->Input<Tensor>(1); const auto& bias_dims = bias->Shape().GetDims(); if (bias_dims.size() != 1) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "bias is expected to have 1 dimensions, got ", bias_dims.size()); } if (bias_dims[0] != input_dims[2]) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "last dimension of input and bias are not the same"); } TensorShapeVector output_shape = input->Shape().AsShapeVector(); output_shape[2] = input_dims[2] / 2; Tensor* output = context->Output(0, output_shape); typedef typename ToHipType<T>::MappedType HipT; const int32_t grid_size = static_cast<int32_t>(input_dims[0] * input_dims[1]); const int32_t half_hidden_size = static_cast<int32_t>(input_dims[2] / 2); LaunchBiasSplitGeluKernel<HipT>(Stream(context), grid_size, half_hidden_size, reinterpret_cast<const HipT*>(input->Data<T>()), reinterpret_cast<const HipT*>(bias->Data<T>()), reinterpret_cast<HipT*>(output->MutableData<T>())); HIP_RETURN_IF_ERROR(hipPeekAtLastError()); return Status::OK(); } } } } ###
#include "core/providers/cuda/cuda_common.h" #include "contrib_ops/cuda/diffusion/bias_split_gelu.h" #include "contrib_ops/cuda/diffusion/bias_split_gelu_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { #define REGISTER_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( BiasSplitGelu, kMSDomain, 1, T, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), BiasSplitGelu<T>); REGISTER_KERNEL_TYPED(MLFloat16); REGISTER_KERNEL_TYPED(float); using namespace ONNX_NAMESPACE; template <typename T> BiasSplitGelu<T>::BiasSplitGelu(const OpKernelInfo& op_info) : CudaKernel(op_info) { } template <typename T> Status BiasSplitGelu<T>::ComputeInternal(OpKernelContext* context) const { const Tensor* input = context->Input<Tensor>(0); const auto& input_dims = input->Shape().GetDims(); if (input_dims.size() != 3) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "input is expected to have 3 dimensions, got ", input_dims.size()); } if (input_dims[2] != 2560 && input_dims[2] != 5120 && input_dims[2] != 10240) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "hidden size should be 2560, 5120 or 10240, got ", input_dims[2]); } const Tensor* bias = context->Input<Tensor>(1); const auto& bias_dims = bias->Shape().GetDims(); if (bias_dims.size() != 1) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "bias is expected to have 1 dimensions, got ", bias_dims.size()); } if (bias_dims[0] != input_dims[2]) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "last dimension of input and bias are not the same"); } TensorShapeVector output_shape = input->Shape().AsShapeVector(); output_shape[2] = input_dims[2] / 2; Tensor* output = context->Output(0, output_shape); typedef typename ToCudaType<T>::MappedType CudaT; const int32_t grid_size = static_cast<int32_t>(input_dims[0] * input_dims[1]); const int32_t half_hidden_size = static_cast<int32_t>(input_dims[2] / 2); LaunchBiasSplitGeluKernel<CudaT>(Stream(context), grid_size, half_hidden_size, reinterpret_cast<const CudaT*>(input->Data<T>()), reinterpret_cast<const CudaT*>(bias->Data<T>()), reinterpret_cast<CudaT*>(output->MutableData<T>())); CUDA_RETURN_IF_ERROR(cudaPeekAtLastError()); return Status::OK(); } } } } ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace contrib { namespace rocm { using namespace onnxruntime::rocm; template <typename T> class BiasSplitGelu final : public RocmKernel { public: BiasSplitGelu(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; template <typename T> class BiasSplitGelu final : public CudaKernel { public: BiasSplitGelu(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* context) const override; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
#include "hip/hip_runtime.h" #include <hipcub/hipcub.hpp> #include "core/providers/rocm/cu_inc/common.cuh" #include "contrib_ops/rocm/diffusion/bias_split_gelu_impl.h" using namespace onnxruntime::rocm; namespace onnxruntime { namespace contrib { namespace rocm { template <typename T, int32_t HHS, int32_t TPB> __global__ void biasSplitGeluKernel(T const* input, T const* bias, T* output) { int32_t index_input = blockIdx.x * HHS * 2 + threadIdx.x; int32_t index_output = blockIdx.x * HHS + threadIdx.x; int32_t index_bias = threadIdx.x; #pragma unroll for (int32_t i = 0; i < HHS / TPB; ++i) { auto value_left = (float)(input[index_input] + bias[index_bias]); auto value_right = (float)(input[index_input + HHS] + bias[index_bias + HHS]); float gelu_right = value_right * 0.5f * (erff(value_right / 1.41421356237f) + 1.0f); float result = value_left * gelu_right; output[index_output] = static_cast<T>(result); index_input += TPB; index_output += TPB; index_bias += TPB; } return; } template <typename T> void LaunchBiasSplitGeluKernel(hipStream_t stream, int32_t grid_size, int32_t half_hidden_size, T const* input, T const* bias, T* output) { constexpr int32_t TPB = 256; switch (half_hidden_size) { case 1280: (biasSplitGeluKernel<T, 1280, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, output); break; case 2560: (biasSplitGeluKernel<T, 2560, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, output); break; case 5120: (biasSplitGeluKernel<T, 5120, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, output); break; default: ORT_NOT_IMPLEMENTED("Not implemented"); } } template __global__ void biasSplitGeluKernel<float, 1280, 256>(float const*, float const*, float*); template __global__ void biasSplitGeluKernel<float, 2560, 256>(float const*, float const*, float*); template __global__ void biasSplitGeluKernel<float, 5120, 256>(float const*, float const*, float*); template __global__ void biasSplitGeluKernel<half, 1280, 256>(half const*, half const*, half*); template __global__ void biasSplitGeluKernel<half, 2560, 256>(half const*, half const*, half*); template __global__ void biasSplitGeluKernel<half, 5120, 256>(half const*, half const*, half*); template void LaunchBiasSplitGeluKernel<float>(hipStream_t stream, int32_t grid_size, int32_t half_hidden_size, float const* input, float const* bias, float* output); template void LaunchBiasSplitGeluKernel<half>(hipStream_t stream, int32_t grid_size, int32_t half_hidden_size, half const* input, half const* bias, half* output); } } } ###
#include <cub/cub.cuh> #include "core/providers/cuda/cu_inc/common.cuh" #include "contrib_ops/cuda/diffusion/bias_split_gelu_impl.h" using namespace onnxruntime::cuda; namespace onnxruntime { namespace contrib { namespace cuda { template <typename T, int32_t HHS, int32_t TPB> __global__ void biasSplitGeluKernel(T const* input, T const* bias, T* output) { int32_t index_input = blockIdx.x * HHS * 2 + threadIdx.x; int32_t index_output = blockIdx.x * HHS + threadIdx.x; int32_t index_bias = threadIdx.x; #pragma unroll for (int32_t i = 0; i < HHS / TPB; ++i) { auto value_left = (float)(input[index_input] + bias[index_bias]); auto value_right = (float)(input[index_input + HHS] + bias[index_bias + HHS]); float gelu_right = value_right * 0.5f * (erff(value_right / 1.41421356237f) + 1.0f); float result = value_left * gelu_right; output[index_output] = static_cast<T>(result); index_input += TPB; index_output += TPB; index_bias += TPB; } return; } template <typename T> void LaunchBiasSplitGeluKernel(cudaStream_t stream, int32_t grid_size, int32_t half_hidden_size, T const* input, T const* bias, T* output) { constexpr int32_t TPB = 256; switch (half_hidden_size) { case 1280: (biasSplitGeluKernel<T, 1280, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, output); break; case 2560: (biasSplitGeluKernel<T, 2560, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, output); break; case 5120: (biasSplitGeluKernel<T, 5120, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, output); break; default: ORT_NOT_IMPLEMENTED("Not implemented"); } } template __global__ void biasSplitGeluKernel<float, 1280, 256>(float const*, float const*, float*); template __global__ void biasSplitGeluKernel<float, 2560, 256>(float const*, float const*, float*); template __global__ void biasSplitGeluKernel<float, 5120, 256>(float const*, float const*, float*); template __global__ void biasSplitGeluKernel<half, 1280, 256>(half const*, half const*, half*); template __global__ void biasSplitGeluKernel<half, 2560, 256>(half const*, half const*, half*); template __global__ void biasSplitGeluKernel<half, 5120, 256>(half const*, half const*, half*); template void LaunchBiasSplitGeluKernel<float>(cudaStream_t stream, int32_t grid_size, int32_t half_hidden_size, float const* input, float const* bias, float* output); template void LaunchBiasSplitGeluKernel<half>(cudaStream_t stream, int32_t grid_size, int32_t half_hidden_size, half const* input, half const* bias, half* output); } } } ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/common/status.h" #include <hip/hip_runtime.h> namespace onnxruntime { namespace contrib { namespace rocm { template <typename T> void LaunchBiasSplitGeluKernel(hipStream_t stream, int32_t grid_size, int32_t half_hidden_size, T const* input, T const* bias, T* output); } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/common/status.h" #include <cuda.h> namespace onnxruntime { namespace contrib { namespace cuda { template <typename T> void LaunchBiasSplitGeluKernel(cudaStream_t stream, int32_t grid_size, int32_t half_hidden_size, T const* input, T const* bias, T* output); } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace contrib { namespace rocm { using namespace onnxruntime::rocm; class GroupNorm final : public RocmKernel { public: GroupNorm(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* context) const override; private: bool use_swish_activation_; float epsilon_; int num_groups_; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; class GroupNorm final : public CudaKernel { public: GroupNorm(const OpKernelInfo& op_kernel_info); Status ComputeInternal(OpKernelContext* context) const override; private: bool use_swish_activation_; float epsilon_; int num_groups_; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace contrib { namespace rocm { using namespace onnxruntime::rocm; template <typename T> class GridSample final : public RocmKernel { public: explicit GridSample(const OpKernelInfo& info); Status ComputeInternal(OpKernelContext* context) const override; private: int64_t mode_i_; // 0: bilinear (default), 1: nearest 2: bicubic int64_t padding_mode_i_; // 0:'zeros', 1: 'border', 2:'reflection' int64_t align_corners_; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/common/common.h" #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; template <typename T> class GridSample final : public CudaKernel { public: explicit GridSample(const OpKernelInfo& info); Status ComputeInternal(OpKernelContext* context) const override; private: int64_t mode_i_; // 0: bilinear (default), 1: nearest 2: bicubic int64_t padding_mode_i_; // 0:'zeros', 1: 'border', 2:'reflection' int64_t align_corners_; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/shared_inc/rocm_utils.h" namespace onnxruntime { namespace contrib { namespace rocm { template <typename T> void GridSampleImpl( hipStream_t stream, const T* input_data, const T* grid_data, const int64_t mode, const int64_t padding_mode, const int64_t align_corners, const int64_t dims_input[4], const int64_t H_out, const int64_t W_out, T* output_data); } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/shared_inc/cuda_utils.h" namespace onnxruntime { namespace contrib { namespace cuda { template <typename T> void GridSampleImpl( cudaStream_t stream, const T* input_data, const T* grid_data, const int64_t mode, const int64_t padding_mode, const int64_t align_corners, const int64_t dims_input[4], const int64_t H_out, const int64_t W_out, T* output_data); } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/rocm/nn/layer_norm.h" #include "core/providers/rocm/rocm_common.h" namespace onnxruntime { namespace contrib { namespace rocm { // LayerNormalization is an official ONNX operator in opset 17. #define REGISTER_KERNEL_TYPED(T, U, V) \ ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX(LayerNormalization, kOnnxDomain, 1, 16, T##_##U##_##V, \ kRocmExecutionProvider, \ (*KernelDefBuilder::Create()) \ .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \ .TypeConstraint("U", DataTypeImpl::GetTensorType<U>()) \ .TypeConstraint("V", DataTypeImpl::GetTensorType<V>()), \ onnxruntime::rocm::LayerNorm<T, U, V, false>); \ ONNX_OPERATOR_TYPED_KERNEL_EX(SimplifiedLayerNormalization, kOnnxDomain, 1, T##_##U##_##V, kRocmExecutionProvider, \ (*KernelDefBuilder::Create()) \ .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \ .TypeConstraint("U", DataTypeImpl::GetTensorType<U>()) \ .TypeConstraint("V", DataTypeImpl::GetTensorType<V>()), \ onnxruntime::rocm::LayerNorm<T, U, V, true>); REGISTER_KERNEL_TYPED(float, float, float) REGISTER_KERNEL_TYPED(double, double, double) REGISTER_KERNEL_TYPED(MLFloat16, float, MLFloat16) REGISTER_KERNEL_TYPED(float, float, MLFloat16) REGISTER_KERNEL_TYPED(MLFloat16, float, float) REGISTER_KERNEL_TYPED(BFloat16, float, BFloat16) } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/shared_library/provider_api.h" #include "core/providers/cuda/nn/layer_norm.h" #include "core/providers/cuda/cuda_common.h" namespace onnxruntime { namespace contrib { namespace cuda { // LayerNormalization is an official ONNX operator in opset 17. #define REGISTER_KERNEL_TYPED(T, U, V) \ ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX(LayerNormalization, kOnnxDomain, 1, 16, T##_##U##_##V, \ kCudaExecutionProvider, \ (*KernelDefBuilder::Create()) \ .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \ .TypeConstraint("U", DataTypeImpl::GetTensorType<U>()) \ .TypeConstraint("V", DataTypeImpl::GetTensorType<V>()), \ onnxruntime::cuda::LayerNorm<T, U, V, false>); \ ONNX_OPERATOR_TYPED_KERNEL_EX(SimplifiedLayerNormalization, kOnnxDomain, 1, T##_##U##_##V, kCudaExecutionProvider, \ (*KernelDefBuilder::Create()) \ .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \ .TypeConstraint("U", DataTypeImpl::GetTensorType<U>()) \ .TypeConstraint("V", DataTypeImpl::GetTensorType<V>()), \ onnxruntime::cuda::LayerNorm<T, U, V, true>); REGISTER_KERNEL_TYPED(float, float, float) REGISTER_KERNEL_TYPED(double, double, double) REGISTER_KERNEL_TYPED(MLFloat16, float, MLFloat16) REGISTER_KERNEL_TYPED(float, float, MLFloat16) REGISTER_KERNEL_TYPED(MLFloat16, float, float) REGISTER_KERNEL_TYPED(BFloat16, float, BFloat16) } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/rocm_kernel.h" #include "core/providers/rocm/rocm_common.h" #include "core/framework/random_generator.h" using namespace onnxruntime::rocm; namespace onnxruntime { namespace contrib { namespace rocm { template <typename T> void BiasDropoutKernelImpl(const hipDeviceProp_t& prop, hipStream_t stream, const int64_t N, const int64_t mask_element_count, const fast_divmod fdm_dim, const float ratio, PhiloxGenerator& generator, const T* X_data, const T* bias_data, const T* residual_data, T* Y_data, void* mask_data, bool has_same_shape_bias, bool use_bitmask); template <bool UseBitmask> class BiasDropout final : public RocmKernel { public: BiasDropout(const OpKernelInfo& info) : RocmKernel(info) { int64_t seed = 0; if (info.GetAttr<int64_t>("seed", &seed).IsOK()) { generator_ = std::make_unique<PhiloxGenerator>(static_cast<uint64_t>(seed)); } } Status ComputeInternal(OpKernelContext* context) const override; private: mutable std::unique_ptr<PhiloxGenerator> generator_; static constexpr float default_ratio_ = 0.5f; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/cuda_kernel.h" #include "core/providers/cuda/cuda_common.h" #include "core/framework/random_generator.h" using namespace onnxruntime::cuda; namespace onnxruntime { namespace contrib { namespace cuda { template <typename T> void BiasDropoutKernelImpl(const cudaDeviceProp& prop, cudaStream_t stream, const int64_t N, const int64_t mask_element_count, const fast_divmod fdm_dim, const float ratio, PhiloxGenerator& generator, const T* X_data, const T* bias_data, const T* residual_data, T* Y_data, void* mask_data, bool has_same_shape_bias, bool use_bitmask); template <bool UseBitmask> class BiasDropout final : public CudaKernel { public: BiasDropout(const OpKernelInfo& info) : CudaKernel(info) { int64_t seed = 0; if (info.GetAttr<int64_t>("seed", &seed).IsOK()) { generator_ = std::make_unique<PhiloxGenerator>(static_cast<uint64_t>(seed)); } } Status ComputeInternal(OpKernelContext* context) const override; private: mutable std::unique_ptr<PhiloxGenerator> generator_; static constexpr float default_ratio_ = 0.5f; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/math/binary_elementwise_ops_impl.h" namespace onnxruntime { namespace rocm { // define the device functors that perform the computation on scalars #define OP_FUNCTOR_DEFINITION(name, expr) \ template <class T, class T1, class T2> \ struct OP_##name { \ __device__ __inline__ T operator()(T1 a, T2 b) const { \ return (expr); \ } \ }; #define BINARY_OP_NAME_EXPR(name, expr) \ OP_FUNCTOR_DEFINITION(name, expr) BINARY_OPS() OP_FUNCTOR_DEFINITION(Pow, _Pow(a, b)) #undef BINARY_OP_NAME_EXPR #define BINARY_OP_NAME_EXPR2(name, expr) \ OP_FUNCTOR_DEFINITION(name, expr) BINARY_OPS2() #undef BINARY_OP_NAME_EXPR2 #undef OP_FUNCTOR_DEFINITION } // namespace rocm } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/math/binary_elementwise_ops_impl.h" namespace onnxruntime { namespace cuda { // define the device functors that perform the computation on scalars #define OP_FUNCTOR_DEFINITION(name, expr) \ template <class T, class T1, class T2> \ struct OP_##name { \ __device__ __inline__ T operator()(T1 a, T2 b) const { \ return (expr); \ } \ }; #define BINARY_OP_NAME_EXPR(name, expr) \ OP_FUNCTOR_DEFINITION(name, expr) BINARY_OPS() OP_FUNCTOR_DEFINITION(Pow, _Pow(a, b)) #undef BINARY_OP_NAME_EXPR #define BINARY_OP_NAME_EXPR2(name, expr) \ OP_FUNCTOR_DEFINITION(name, expr) BINARY_OPS2() #undef BINARY_OP_NAME_EXPR2 #undef OP_FUNCTOR_DEFINITION } // namespace cuda } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "contrib_ops/rocm/math/bias_softmax.h" #include "core/providers/rocm/rocm_common.h" #include "contrib_ops/rocm/math/bias_softmax_impl.h" using namespace onnxruntime; using namespace onnxruntime::rocm; using namespace onnxruntime::contrib::rocm; namespace onnxruntime { namespace contrib { namespace rocm { namespace { template <typename T> struct DispatchBiasSoftmaxImpl { Status operator()(hipStream_t stream, miopenHandle_t miopen_handle, Tensor* Y, const Tensor* X, const Tensor* B, int element_count, int batch_count, bool is_inner_broadcast, int bias_broadcast_size) { typedef typename ToHipType<T>::MappedType HipT; HipT* output_data = reinterpret_cast<HipT*>(Y->template MutableData<T>()); const HipT* input_data = reinterpret_cast<const HipT*>(X->template Data<T>()); const HipT* bias_data = reinterpret_cast<const HipT*>(B->template Data<T>()); return BiasSoftmaxImpl<HipT>(stream, miopen_handle, output_data, input_data, bias_data, element_count, batch_count, is_inner_broadcast, bias_broadcast_size); } }; } // namespace // MIOpen doesn't support double so ROCm kernel doesn't have double support for now. #ifdef USE_ROCM #define BIAS_SOFTMAX_TYPES float, MLFloat16 #else #define BIAS_SOFTMAX_TYPES float, MLFloat16, double #endif ONNX_OPERATOR_KERNEL_EX( BiasSoftmax, kMSDomain, 1, kRocmExecutionProvider, (*KernelDefBuilder::Create()).TypeConstraint("T", BuildKernelDefConstraints<BIAS_SOFTMAX_TYPES>()), BiasSoftmax); Status BiasSoftmax::ComputeInternal(OpKernelContext* ctx) const { const Tensor* X = ctx->Input<Tensor>(0); const Tensor* B = ctx->Input<Tensor>(1); const TensorShape& X_shape = X->Shape(); const TensorShape& B_shape = B->Shape(); Tensor* Y = ctx->Output(0, X_shape); const int axis = static_cast<int>(HandleNegativeAxis(axis_, X_shape.NumDimensions())); const int batch_count = static_cast<int>(X_shape.SizeToDimension(axis)); const int element_count = static_cast<int>(X_shape.SizeFromDimension(axis)); int bias_broadcast_size = static_cast<int>(B_shape.Size() / element_count); if (is_inner_broadcast_) bias_broadcast_size = batch_count / bias_broadcast_size; utils::MLTypeCallDispatcher<BIAS_SOFTMAX_TYPES> t_disp(X->GetElementType()); return t_disp.InvokeRet<Status, DispatchBiasSoftmaxImpl>(Stream(ctx), GetMiopenHandle(ctx), Y, X, B, element_count, batch_count, is_inner_broadcast_, bias_broadcast_size); } #undef BIAS_SOFTMAX_TYPES } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "contrib_ops/cuda/math/bias_softmax.h" #include "core/providers/cuda/cuda_common.h" #include "contrib_ops/cuda/math/bias_softmax_impl.h" using namespace onnxruntime; using namespace onnxruntime::cuda; using namespace onnxruntime::contrib::cuda; namespace onnxruntime { namespace contrib { namespace cuda { namespace { template <typename T> struct DispatchBiasSoftmaxImpl { Status operator()(cudaStream_t stream, cudnnHandle_t cudnn_handle, Tensor* Y, const Tensor* X, const Tensor* B, int element_count, int batch_count, bool is_inner_broadcast, int bias_broadcast_size) { typedef typename ToCudaType<T>::MappedType CudaT; CudaT* output_data = reinterpret_cast<CudaT*>(Y->template MutableData<T>()); const CudaT* input_data = reinterpret_cast<const CudaT*>(X->template Data<T>()); const CudaT* bias_data = reinterpret_cast<const CudaT*>(B->template Data<T>()); return BiasSoftmaxImpl<CudaT>(stream, cudnn_handle, output_data, input_data, bias_data, element_count, batch_count, is_inner_broadcast, bias_broadcast_size); } }; } // namespace // MIOpen doesn't support double so ROCm kernel doesn't have double support for now. #ifdef USE_ROCM #define BIAS_SOFTMAX_TYPES float, MLFloat16 #else #define BIAS_SOFTMAX_TYPES float, MLFloat16, double #endif ONNX_OPERATOR_KERNEL_EX( BiasSoftmax, kMSDomain, 1, kCudaExecutionProvider, (*KernelDefBuilder::Create()).TypeConstraint("T", BuildKernelDefConstraints<BIAS_SOFTMAX_TYPES>()), BiasSoftmax); Status BiasSoftmax::ComputeInternal(OpKernelContext* ctx) const { const Tensor* X = ctx->Input<Tensor>(0); const Tensor* B = ctx->Input<Tensor>(1); const TensorShape& X_shape = X->Shape(); const TensorShape& B_shape = B->Shape(); Tensor* Y = ctx->Output(0, X_shape); const int axis = static_cast<int>(HandleNegativeAxis(axis_, X_shape.NumDimensions())); const int batch_count = static_cast<int>(X_shape.SizeToDimension(axis)); const int element_count = static_cast<int>(X_shape.SizeFromDimension(axis)); int bias_broadcast_size = static_cast<int>(B_shape.Size() / element_count); if (is_inner_broadcast_) bias_broadcast_size = batch_count / bias_broadcast_size; utils::MLTypeCallDispatcher<BIAS_SOFTMAX_TYPES> t_disp(X->GetElementType()); return t_disp.InvokeRet<Status, DispatchBiasSoftmaxImpl>(Stream(ctx), GetCudnnHandle(ctx), Y, X, B, element_count, batch_count, is_inner_broadcast_, bias_broadcast_size); } #undef BIAS_SOFTMAX_TYPES } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/rocm_kernel.h" namespace onnxruntime { namespace contrib { namespace rocm { // BiasSoftmax follows the OpSet-11 definision of Softmax Op, that is, the input will be coerced to a 2D tensor // using axis attribute, all dims after axis (included) are in the same batch. This is different from definition // since OpSet-13. To use BiasSoftmax, during the fusion, if Softmax is OpSet-13 or newer, you can only fuse it // when axis attribute is the last dim, othewise, the computation result may be wrong. class BiasSoftmax final : public onnxruntime::rocm::RocmKernel { public: BiasSoftmax(const OpKernelInfo& info) : RocmKernel{info} { info.GetAttrOrDefault("axis", &axis_, static_cast<int64_t>(1)); int64_t is_inner_broadcast_value; ORT_ENFORCE(info.GetAttr<int64_t>("is_inner_broadcast", &is_inner_broadcast_value).IsOK()); is_inner_broadcast_ = is_inner_broadcast_value != 0; } Status ComputeInternal(OpKernelContext* context) const override; private: int64_t axis_; bool is_inner_broadcast_; }; } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/cuda_kernel.h" namespace onnxruntime { namespace contrib { namespace cuda { // BiasSoftmax follows the OpSet-11 definision of Softmax Op, that is, the input will be coerced to a 2D tensor // using axis attribute, all dims after axis (included) are in the same batch. This is different from definition // since OpSet-13. To use BiasSoftmax, during the fusion, if Softmax is OpSet-13 or newer, you can only fuse it // when axis attribute is the last dim, othewise, the computation result may be wrong. class BiasSoftmax final : public onnxruntime::cuda::CudaKernel { public: BiasSoftmax(const OpKernelInfo& info) : CudaKernel{info} { info.GetAttrOrDefault("axis", &axis_, static_cast<int64_t>(1)); int64_t is_inner_broadcast_value; ORT_ENFORCE(info.GetAttr<int64_t>("is_inner_broadcast", &is_inner_broadcast_value).IsOK()); is_inner_broadcast_ = is_inner_broadcast_value != 0; } Status ComputeInternal(OpKernelContext* context) const override; private: int64_t axis_; bool is_inner_broadcast_; }; } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/rocm/rocm_common.h" #include "core/providers/rocm/shared_inc/rocm_utils.h" namespace onnxruntime { namespace contrib { namespace rocm { template <typename T> Status BiasSoftmaxImpl(hipStream_t stream, miopenHandle_t miopen_handle, T* output_data, const T* input_data, const T* bias_data, int element_count, int batch_count, bool is_inner_broadcast, int bias_broadcast_size); } // namespace rocm } // namespace contrib } // namespace onnxruntime ###
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/shared_inc/cuda_utils.h" namespace onnxruntime { namespace contrib { namespace cuda { template <typename T> Status BiasSoftmaxImpl(cudaStream_t stream, cudnnHandle_t cudnn_handle, T* output_data, const T* input_data, const T* bias_data, int element_count, int batch_count, bool is_inner_broadcast, int bias_broadcast_size); } // namespace cuda } // namespace contrib } // namespace onnxruntime ###
#include "contrib_ops/rocm/math/binary_elementwise_ops.h" #include "contrib_ops/rocm/math/binary_elementwise_ops_impl.h" using namespace onnxruntime::common; namespace onnxruntime { namespace contrib { namespace rocm { #define CONTRIB_BINARY_ELEMENTWISE_REGISTER_KERNEL_TYPED(x, ver, T) ONNX_OPERATOR_TYPED_KERNEL_EX( x, kMSDomain, ver, T, kRocmExecutionProvider, (*KernelDefBuilder::Create()).TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), x<T>); #define CONTRIB_BINARY_ELEMENTWISE_COMPUTE(x, T) template <> Status x<T>::ComputeInternal(OpKernelContext* context) const { BinaryElementwisePreparation prepare; ORT_RETURN_IF_ERROR(Prepare(context, &prepare)); Impl_##x<typename ToHipType<T>::MappedType>( Stream(context), prepare.output_rank_or_simple_broadcast, &prepare.lhs_padded_strides, reinterpret_cast<const typename ToHipType<T>::MappedType*>(prepare.lhs_tensor->Data<T>()), &prepare.rhs_padded_strides, reinterpret_cast<const typename ToHipType<T>::MappedType*>(prepare.rhs_tensor->Data<T>()), &prepare.fdm_output_strides, prepare.fdm_H, prepare.fdm_C, reinterpret_cast<typename ToHipType<T>::MappedType*>(prepare.output_tensor->MutableData<T>()), prepare.output_tensor->Shape().Size()); return Status::OK(); } #define CONTRIB_BINARY_OP_TYPED(name, ver, T) CONTRIB_BINARY_ELEMENTWISE_REGISTER_KERNEL_TYPED(name, ver, T) CONTRIB_BINARY_ELEMENTWISE_COMPUTE(name, T) #define CONTRIB_BINARY_OP_HFD(name, ver) CONTRIB_BINARY_OP_TYPED(name, ver, MLFloat16) CONTRIB_BINARY_OP_TYPED(name, ver, float) CONTRIB_BINARY_OP_TYPED(name, ver, double) CONTRIB_BINARY_OP_TYPED(name, ver, BFloat16) CONTRIB_BINARY_OP_HFD(BiasGelu, 1) } } } ###
#include "contrib_ops/cuda/math/binary_elementwise_ops.h" #include "contrib_ops/cuda/math/binary_elementwise_ops_impl.h" using namespace onnxruntime::common; namespace onnxruntime { namespace contrib { namespace cuda { #define CONTRIB_BINARY_ELEMENTWISE_REGISTER_KERNEL_TYPED(x, ver, T) ONNX_OPERATOR_TYPED_KERNEL_EX( x, kMSDomain, ver, T, kCudaExecutionProvider, (*KernelDefBuilder::Create()).TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), x<T>); #define CONTRIB_BINARY_ELEMENTWISE_COMPUTE(x, T) template <> Status x<T>::ComputeInternal(OpKernelContext* context) const { BinaryElementwisePreparation prepare; ORT_RETURN_IF_ERROR(Prepare(context, &prepare)); Impl_##x<typename ToCudaType<T>::MappedType>( Stream(context), prepare.output_rank_or_simple_broadcast, &prepare.lhs_padded_strides, reinterpret_cast<const typename ToCudaType<T>::MappedType*>(prepare.lhs_tensor->Data<T>()), &prepare.rhs_padded_strides, reinterpret_cast<const typename ToCudaType<T>::MappedType*>(prepare.rhs_tensor->Data<T>()), &prepare.fdm_output_strides, prepare.fdm_H, prepare.fdm_C, reinterpret_cast<typename ToCudaType<T>::MappedType*>(prepare.output_tensor->MutableData<T>()), prepare.output_tensor->Shape().Size()); return Status::OK(); } #define CONTRIB_BINARY_OP_TYPED(name, ver, T) CONTRIB_BINARY_ELEMENTWISE_REGISTER_KERNEL_TYPED(name, ver, T) CONTRIB_BINARY_ELEMENTWISE_COMPUTE(name, T) #define CONTRIB_BINARY_OP_HFD(name, ver) CONTRIB_BINARY_OP_TYPED(name, ver, MLFloat16) CONTRIB_BINARY_OP_TYPED(name, ver, float) CONTRIB_BINARY_OP_TYPED(name, ver, double) CONTRIB_BINARY_OP_TYPED(name, ver, BFloat16) CONTRIB_BINARY_OP_HFD(BiasGelu, 1) } } } ###