|
#pragma once |
|
|
|
#include <ATen/CPUGeneratorImpl.h> |
|
#include <ATen/LinalgBackend.h> |
|
#include <ATen/core/ATenGeneral.h> |
|
#include <ATen/core/DeprecatedTypeProperties.h> |
|
#include <ATen/core/Generator.h> |
|
#include <ATen/core/LegacyTypeDispatch.h> |
|
#include <ATen/detail/CUDAHooksInterface.h> |
|
#include <ATen/detail/HIPHooksInterface.h> |
|
#include <ATen/detail/ORTHooksInterface.h> |
|
#include <c10/core/QEngine.h> |
|
#include <c10/core/impl/DeviceGuardImplInterface.h> |
|
#include <c10/util/CallOnce.h> |
|
#include <c10/util/Exception.h> |
|
#include <c10/util/irange.h> |
|
|
|
#include <cstdint> |
|
#include <memory> |
|
#include <mutex> |
|
|
|
namespace at { |
|
|
|
class Tensor; |
|
|
|
enum class TORCH_API Float32MatmulPrecision { HIGHEST, HIGH, MEDIUM }; |
|
|
|
class TORCH_API Context { |
|
public: |
|
Context(); |
|
|
|
const Generator& defaultGenerator(Device device) { |
|
DeviceType device_type = device.type(); |
|
initCUDAIfNeeded(device_type); |
|
initHIPIfNeeded(device_type); |
|
if (device_type == at::kCPU) { |
|
return at::detail::getDefaultCPUGenerator(); |
|
} else if (device_type == at::kCUDA) { |
|
return at::detail::getCUDAHooks().getDefaultCUDAGenerator(device.index()); |
|
} else { |
|
AT_ERROR(DeviceTypeName(device_type), " device type not enabled."); |
|
} |
|
} |
|
Device getDeviceFromPtr(void* data, DeviceType device_type) { |
|
initCUDAIfNeeded(device_type); |
|
initHIPIfNeeded(device_type); |
|
if (device_type == at::kCPU) { |
|
return DeviceType::CPU; |
|
} else if (device_type == at::kCUDA) { |
|
return at::detail::getCUDAHooks().getDeviceFromPtr(data); |
|
} else { |
|
AT_ERROR(DeviceTypeName(device_type), " device type not enabled."); |
|
} |
|
} |
|
static bool isPinnedPtr(void* data) { |
|
return detail::getCUDAHooks().isPinnedPtr(data); |
|
} |
|
static bool hasOpenMP(); |
|
static bool hasMKL(); |
|
static bool hasLAPACK(); |
|
static bool hasMKLDNN(); |
|
static bool hasMAGMA() { |
|
return detail::getCUDAHooks().hasMAGMA(); |
|
} |
|
static bool hasCUDA() { |
|
return detail::getCUDAHooks().hasCUDA(); |
|
} |
|
static bool hasCUDART() { |
|
return detail::getCUDAHooks().hasCUDART(); |
|
} |
|
static long versionCUDART() { |
|
return detail::getCUDAHooks().versionCUDART(); |
|
} |
|
static bool hasCuDNN() { |
|
return detail::getCUDAHooks().hasCuDNN(); |
|
} |
|
static long versionCuDNN() { |
|
return detail::getCUDAHooks().versionCuDNN(); |
|
} |
|
static bool hasCuSOLVER() { |
|
return detail::getCUDAHooks().hasCuSOLVER(); |
|
} |
|
static bool hasHIP() { |
|
return detail::getHIPHooks().hasHIP(); |
|
} |
|
static bool hasIPU() { |
|
return c10::impl::hasDeviceGuardImpl(at::DeviceType::IPU); |
|
} |
|
static bool hasXLA() { |
|
return c10::impl::hasDeviceGuardImpl(at::DeviceType::XLA); |
|
} |
|
static bool hasLazy() { |
|
return c10::impl::hasDeviceGuardImpl(at::DeviceType::Lazy); |
|
} |
|
static bool hasMPS(); |
|
|
|
static bool hasORT() { |
|
return c10::impl::hasDeviceGuardImpl(at::DeviceType::ORT); |
|
} |
|
|
|
|
|
void lazyInitCUDA() { |
|
c10::call_once(thc_init, [&] { detail::getCUDAHooks().initCUDA(); }); |
|
} |
|
void lazyInitHIP() { |
|
c10::call_once(thh_init, [&] { detail::getHIPHooks().initHIP(); }); |
|
} |
|
static const at::cuda::NVRTC& getNVRTC() { |
|
return detail::getCUDAHooks().nvrtc(); |
|
} |
|
|
|
static bool setFlushDenormal(bool on); |
|
|
|
|
|
|
|
|
|
|
|
bool userEnabledCuDNN() const; |
|
void setUserEnabledCuDNN(bool e); |
|
bool userEnabledMkldnn() const; |
|
void setUserEnabledMkldnn(bool e); |
|
bool benchmarkCuDNN() const; |
|
void setBenchmarkCuDNN(bool); |
|
int benchmarkLimitCuDNN() const; |
|
void setBenchmarkLimitCuDNN(int); |
|
bool deterministicCuDNN() const; |
|
void setDeterministicCuDNN(bool); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void setSDPUseFlash(bool); |
|
bool userEnabledFlashSDP() const; |
|
|
|
void setSDPUseMath(bool); |
|
bool userEnabledMathSDP() const; |
|
|
|
at::LinalgBackend linalgPreferredBackend() const; |
|
void setLinalgPreferredBackend(at::LinalgBackend); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool deterministicAlgorithms() const; |
|
bool deterministicAlgorithmsWarnOnly() const; |
|
void setDeterministicAlgorithms(bool, bool); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void alertNotDeterministic(c10::string_view const& caller); |
|
|
|
|
|
|
|
|
|
|
|
void alertCuBLASConfigNotDeterministic() const; |
|
|
|
void setFloat32MatmulPrecision(const std::string& s); |
|
bool allowTF32CuDNN() const; |
|
void setAllowTF32CuDNN(bool); |
|
bool allowTF32CuBLAS() const; |
|
void setAllowTF32CuBLAS(bool); |
|
Float32MatmulPrecision float32MatmulPrecision() const; |
|
void setFloat32MatmulPrecision(Float32MatmulPrecision p); |
|
bool allowFP16ReductionCuBLAS() const; |
|
void setAllowFP16ReductionCuBLAS(bool); |
|
at::QEngine qEngine() const; |
|
void setQEngine(at::QEngine e); |
|
static const std::vector<at::QEngine>& supportedQEngines(); |
|
static bool isXNNPACKAvailable(); |
|
|
|
|
|
|
|
void setReleaseWeightsWhenPrepacking(bool e); |
|
bool releaseWeightsWhenPrepacking() const; |
|
|
|
void setDisplayVmapFallbackWarnings(bool enabled); |
|
bool areVmapFallbackWarningsEnabled() const; |
|
|
|
void setDefaultMobileCPUAllocator(); |
|
void unsetDefaultMobileCPUAllocator(); |
|
|
|
private: |
|
void initCUDAIfNeeded(DeviceType p) { |
|
if (p == DeviceType::CUDA) { |
|
lazyInitCUDA(); |
|
} |
|
} |
|
void initHIPIfNeeded(DeviceType p) { |
|
if (p == DeviceType::HIP) { |
|
lazyInitHIP(); |
|
} |
|
} |
|
static bool checkCuBLASConfigDeterministic(); |
|
c10::once_flag thc_init; |
|
c10::once_flag thh_init; |
|
bool enabled_cudnn = true; |
|
bool deterministic_cudnn = false; |
|
bool _deterministic_algorithms = false; |
|
bool _deterministic_algorithms_warn_only = false; |
|
bool enabled_flashSDP = true; |
|
bool enabled_mathSDP = true; |
|
#ifdef USE_ROCM |
|
bool benchmark_cudnn = true; |
|
#else |
|
bool benchmark_cudnn = false; |
|
#endif |
|
Float32MatmulPrecision float32_matmul_precision = |
|
at::Float32MatmulPrecision::HIGHEST; |
|
int benchmark_limit_cudnn = 10; |
|
bool allow_tf32_cudnn = true; |
|
bool allow_fp16_reduction_cublas = true; |
|
bool enabled_mkldnn = true; |
|
at::LinalgBackend linalg_preferred_backend = at::LinalgBackend::Default; |
|
#ifdef C10_MOBILE |
|
bool release_original_weights = true; |
|
#else |
|
bool release_original_weights = false; |
|
#endif |
|
bool display_vmap_fallback_warnings_ = false; |
|
c10::optional<at::QEngine> quantized_engine = c10::nullopt; |
|
|
|
Allocator* prev_allocator_ptr_{nullptr}; |
|
}; |
|
|
|
TORCH_API Context& globalContext(); |
|
|
|
static inline void init() { |
|
globalContext(); |
|
} |
|
|
|
TORCH_API Allocator* getCPUAllocator(); |
|
|
|
static inline DeprecatedTypeProperties& getDeprecatedTypeProperties( |
|
Backend p, |
|
ScalarType s) { |
|
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( |
|
p, s); |
|
} |
|
|
|
static inline DeprecatedTypeProperties& CPU(ScalarType s) { |
|
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( |
|
Backend::CPU, s); |
|
} |
|
|
|
static inline DeprecatedTypeProperties& CUDA(ScalarType s) { |
|
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( |
|
Backend::CUDA, s); |
|
} |
|
|
|
static inline DeprecatedTypeProperties& HIP(ScalarType s) { |
|
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( |
|
Backend::HIP, s); |
|
} |
|
|
|
static inline DeprecatedTypeProperties& MPS(ScalarType s) { |
|
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( |
|
Backend::MPS, s); |
|
} |
|
|
|
static inline bool hasCUDA() { |
|
return globalContext().hasCUDA(); |
|
} |
|
|
|
static inline bool hasHIP() { |
|
return globalContext().hasHIP(); |
|
} |
|
|
|
static inline bool hasIPU() { |
|
return globalContext().hasIPU(); |
|
} |
|
|
|
static inline bool hasXLA() { |
|
return globalContext().hasXLA(); |
|
} |
|
|
|
static inline bool hasMPS() { |
|
return globalContext().hasMPS(); |
|
} |
|
|
|
static inline bool hasORT() { |
|
return globalContext().hasORT(); |
|
} |
|
|
|
|
|
static inline size_t getNumGPUs() { |
|
|
|
|
|
|
|
|
|
if (hasCUDA() && hasHIP()) { |
|
throw std::runtime_error( |
|
"Enabling both CUDA and HIP in ATen is not supported, as HIP masquerades " |
|
"to be CUDA (e.g., when you say CUDA, on a HIP build of ATen, this actually " |
|
"means HIP. Rebuild PyTorch with one or the other disabled."); |
|
} else if (hasCUDA()) { |
|
return detail::getCUDAHooks().getNumGPUs(); |
|
} else if (hasHIP()) { |
|
return detail::getHIPHooks().getNumGPUs(); |
|
} else { |
|
return 0; |
|
} |
|
} |
|
|
|
static inline bool hasOpenMP() { |
|
return globalContext().hasOpenMP(); |
|
} |
|
|
|
static inline bool hasMKL() { |
|
return globalContext().hasMKL(); |
|
} |
|
|
|
static inline bool hasLAPACK() { |
|
return globalContext().hasLAPACK(); |
|
} |
|
|
|
static inline bool hasMAGMA() { |
|
return globalContext().hasMAGMA(); |
|
} |
|
|
|
static inline bool hasMKLDNN() { |
|
return globalContext().hasMKLDNN(); |
|
} |
|
|
|
static inline void manual_seed(uint64_t seed) { |
|
auto gen = globalContext().defaultGenerator(DeviceType::CPU); |
|
{ |
|
|
|
std::lock_guard<std::mutex> lock(gen.mutex()); |
|
gen.set_current_seed(seed); |
|
} |
|
|
|
|
|
const auto num_gpus = detail::getCUDAHooks().getNumGPUs(); |
|
if (hasCUDA() && num_gpus > 0) { |
|
for (const auto i : c10::irange(num_gpus)) { |
|
auto cuda_gen = globalContext().defaultGenerator( |
|
Device(at::kCUDA, static_cast<c10::DeviceIndex>(i))); |
|
{ |
|
|
|
std::lock_guard<std::mutex> lock(cuda_gen.mutex()); |
|
cuda_gen.set_current_seed(seed); |
|
} |
|
} |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
struct TORCH_API NoTF32Guard { |
|
NoTF32Guard(); |
|
~NoTF32Guard(); |
|
static bool should_disable_tf32(); |
|
|
|
private: |
|
bool changed = false; |
|
}; |
|
|
|
#ifdef USE_ROCM |
|
struct TORCH_API ROCmBackwardPassGuard { |
|
ROCmBackwardPassGuard(); |
|
~ROCmBackwardPassGuard(); |
|
static bool is_backward_pass(); |
|
|
|
private: |
|
static thread_local bool is_backward_pass_; |
|
}; |
|
#endif |
|
|
|
} |
|
|