| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| from .testing import ( |
| DEFAULT_LAUNCH_COMMAND, |
| are_the_same_tensors, |
| assert_exception, |
| capture_call_output, |
| device_count, |
| execute_subprocess_async, |
| get_launch_command, |
| get_torch_dist_unique_port, |
| memory_allocated_func, |
| path_in_accelerate_package, |
| pytest_xdist_worker_id, |
| require_bnb, |
| require_cpu, |
| require_cuda, |
| require_cuda_or_hpu, |
| require_cuda_or_xpu, |
| require_fp8, |
| require_fp16, |
| require_huggingface_suite, |
| require_mlu, |
| require_mps, |
| require_multi_device, |
| require_multi_gpu, |
| require_multi_gpu_or_xpu, |
| require_multi_xpu, |
| require_musa, |
| require_non_cpu, |
| require_non_hpu, |
| require_non_torch_xla, |
| require_non_xpu, |
| require_npu, |
| require_pippy, |
| require_sdaa, |
| require_single_device, |
| require_single_gpu, |
| require_single_xpu, |
| require_torch_min_version, |
| require_torchao, |
| require_torchvision, |
| require_tpu, |
| require_transformer_engine, |
| require_transformer_engine_mxfp8, |
| require_xpu, |
| run_first, |
| skip, |
| slow, |
| torch_device, |
| ) |
| from .training import RegressionDataset, RegressionModel |
|
|
|
|
| from .scripts import test_script, test_sync, test_ops |
|
|