muellerzr commited on
Commit
ad460f5
·
1 Parent(s): 7aae401

Clean up imports and remove them

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. src/accelerator.py +0 -137
  2. src/big_modeling.py +0 -35
  3. src/checkpointing.py +0 -35
  4. src/commands/accelerate_cli.py +0 -13
  5. src/commands/config/__init__.py +0 -40
  6. src/commands/config/cluster.py +0 -34
  7. src/commands/config/config.py +0 -14
  8. src/commands/config/config_args.py +0 -15
  9. src/commands/config/config_utils.py +0 -15
  10. src/commands/config/default.py +0 -12
  11. src/commands/config/sagemaker.py +0 -24
  12. src/commands/config/update.py +0 -9
  13. src/commands/env.py +0 -17
  14. src/commands/estimate.py +0 -24
  15. src/commands/launch.py +0 -50
  16. src/commands/menu/__init__.py +0 -1
  17. src/commands/menu/cursor.py +0 -53
  18. src/commands/menu/helpers.py +0 -47
  19. src/commands/menu/input.py +0 -74
  20. src/commands/menu/keymap.py +0 -122
  21. src/commands/menu/selection_menu.py +0 -131
  22. src/commands/test.py +0 -9
  23. src/commands/tpu.py +0 -12
  24. src/data_loader.py +0 -26
  25. src/hooks.py +0 -19
  26. src/launchers.py +0 -19
  27. src/local_sgd.py +0 -6
  28. src/logging.py +0 -9
  29. src/memory_utils.py +0 -10
  30. src/optimizer.py +0 -15
  31. src/scheduler.py +0 -8
  32. src/state.py +0 -44
  33. src/test_utils/examples.py +0 -134
  34. src/test_utils/scripts/__init__.py +0 -0
  35. src/test_utils/scripts/external_deps/__init__.py +0 -0
  36. src/test_utils/scripts/external_deps/test_checkpointing.py +0 -257
  37. src/test_utils/scripts/external_deps/test_metrics.py +0 -280
  38. src/test_utils/scripts/external_deps/test_peak_memory_usage.py +0 -265
  39. src/test_utils/scripts/external_deps/test_performance.py +0 -219
  40. src/test_utils/scripts/test_cli.py +0 -13
  41. src/test_utils/scripts/test_distributed_data_loop.py +0 -226
  42. src/test_utils/scripts/test_notebook.py +0 -40
  43. src/test_utils/scripts/test_ops.py +0 -147
  44. src/test_utils/scripts/test_script.py +0 -660
  45. src/test_utils/scripts/test_sync.py +0 -355
  46. src/test_utils/testing.py +0 -501
  47. src/test_utils/training.py +0 -89
  48. src/tracking.py +0 -23
  49. src/utils/bnb.py +0 -28
  50. src/utils/constants.py +0 -5
src/accelerator.py CHANGED
@@ -1,140 +1,3 @@
1
-
2
-
3
- from __future__ import annotations
4
-
5
- import contextlib
6
- import functools
7
- import json
8
- import math
9
- import os
10
- import re
11
- import shutil
12
- import sys
13
- import warnings
14
- from collections import OrderedDict
15
- from contextlib import contextmanager
16
- from functools import partial
17
- from types import MethodType
18
- from typing import Any, Callable, Union
19
-
20
- import torch
21
- import torch.utils.hooks as hooks
22
-
23
- from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state
24
- from .data_loader import DataLoaderDispatcher, prepare_data_loader, skip_first_batches
25
- from .hooks import AlignDevicesHook
26
- from .logging import get_logger
27
- from .optimizer import AcceleratedOptimizer
28
- from .scheduler import AcceleratedScheduler
29
- from .state import AcceleratorState, GradientState, PartialState
30
- from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers
31
- from .utils import (
32
- MODEL_NAME,
33
- SAFE_WEIGHTS_INDEX_NAME,
34
- SAFE_WEIGHTS_NAME,
35
- WEIGHTS_INDEX_NAME,
36
- WEIGHTS_NAME,
37
- AutocastKwargs,
38
- DeepSpeedPlugin,
39
- DistributedDataParallelKwargs,
40
- DistributedType,
41
- DynamoBackend,
42
- FP8RecipeKwargs,
43
- FullyShardedDataParallelPlugin,
44
- GradientAccumulationPlugin,
45
- GradScalerKwargs,
46
- InitProcessGroupKwargs,
47
- KwargsHandler,
48
- LoggerType,
49
- MegatronLMPlugin,
50
- PrecisionType,
51
- ProjectConfiguration,
52
- RNGType,
53
- TorchDynamoPlugin,
54
- check_os_kernel,
55
- clean_state_dict_for_safetensors,
56
- compare_versions,
57
- convert_model,
58
- convert_outputs_to_fp32,
59
- extract_model_from_parallel,
60
- gather,
61
- gather_object,
62
- get_mixed_precision_context_manager,
63
- get_pretty_name,
64
- has_transformer_engine_layers,
65
- is_bf16_available,
66
- is_deepspeed_available,
67
- is_fp8_available,
68
- is_ipex_available,
69
- is_megatron_lm_available,
70
- is_msamp_available,
71
- is_npu_available,
72
- is_torch_version,
73
- is_tpu_available,
74
- is_xpu_available,
75
- load_fsdp_model,
76
- load_fsdp_optimizer,
77
- pad_across_processes,
78
- parse_choice_from_env,
79
- recursively_apply,
80
- reduce,
81
- release_memory,
82
- save,
83
- save_fsdp_model,
84
- save_fsdp_optimizer,
85
- shard_checkpoint,
86
- wait_for_everyone,
87
- )
88
- from .utils.constants import FSDP_PYTORCH_VERSION
89
- from .utils.modeling import get_state_dict_offloaded_model
90
- from .utils.other import is_compiled_module
91
-
92
-
93
- if is_deepspeed_available():
94
- from .utils import (
95
- DeepSpeedEngineWrapper,
96
- DeepSpeedOptimizerWrapper,
97
- DeepSpeedSchedulerWrapper,
98
- DummyOptim,
99
- DummyScheduler,
100
- )
101
-
102
- if is_fp8_available():
103
- import transformer_engine.common.recipe as te_recipe
104
- from transformer_engine.pytorch import fp8_autocast
105
-
106
-
107
- if is_megatron_lm_available():
108
- from .utils import (
109
- MegatronEngine,
110
- MegatronLMDummyDataLoader,
111
- MegatronLMDummyScheduler,
112
- MegatronLMOptimizerWrapper,
113
- MegatronLMSchedulerWrapper,
114
- megatron_lm_initialize,
115
- megatron_lm_prepare_data_loader,
116
- megatron_lm_prepare_model,
117
- megatron_lm_prepare_optimizer,
118
- megatron_lm_prepare_scheduler,
119
- )
120
-
121
- from torch.distributed.algorithms.join import Join
122
-
123
-
124
- if is_tpu_available(check_device=False):
125
- import torch_xla.core.xla_model as xm
126
- import torch_xla.distributed.xla_multiprocessing as xmp
127
-
128
-
129
- if is_npu_available(check_device=False):
130
- import torch_npu # noqa: F401
131
-
132
-
133
- try:
134
- from torch.optim.lr_scheduler import LRScheduler
135
- except ImportError:
136
- from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
137
-
138
  logger = get_logger(__name__)
139
 
140
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  logger = get_logger(__name__)
2
 
3
 
src/big_modeling.py CHANGED
@@ -1,38 +1,3 @@
1
-
2
-
3
- import logging
4
- import os
5
- from contextlib import contextmanager
6
- from functools import wraps
7
- from typing import Dict, List, Optional, Union
8
-
9
- import torch
10
- import torch.nn as nn
11
-
12
- from .hooks import (
13
- AlignDevicesHook,
14
- CpuOffload,
15
- UserCpuOffloadHook,
16
- add_hook_to_module,
17
- attach_align_device_hook,
18
- attach_align_device_hook_on_blocks,
19
- )
20
- from .utils import (
21
- OffloadedWeightsLoader,
22
- check_device_map,
23
- extract_submodules_state_dict,
24
- find_tied_parameters,
25
- get_balanced_memory,
26
- infer_auto_device_map,
27
- is_npu_available,
28
- is_torch_version,
29
- load_checkpoint_in_model,
30
- offload_state_dict,
31
- parse_flag_from_env,
32
- retie_parameters,
33
- )
34
-
35
-
36
  logger = logging.getLogger(__name__)
37
 
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  logger = logging.getLogger(__name__)
2
 
3
 
src/checkpointing.py CHANGED
@@ -1,38 +1,3 @@
1
-
2
-
3
- import random
4
- from pathlib import Path
5
- from typing import List
6
-
7
- import numpy as np
8
- import torch
9
- from safetensors.torch import load_file
10
- from torch.cuda.amp import GradScaler
11
-
12
- from .utils import (
13
- MODEL_NAME,
14
- OPTIMIZER_NAME,
15
- RNG_STATE_NAME,
16
- SAFE_MODEL_NAME,
17
- SAFE_WEIGHTS_NAME,
18
- SAMPLER_NAME,
19
- SCALER_NAME,
20
- SCHEDULER_NAME,
21
- WEIGHTS_NAME,
22
- get_pretty_name,
23
- is_tpu_available,
24
- is_xpu_available,
25
- save,
26
- )
27
-
28
-
29
- if is_tpu_available(check_device=False):
30
- import torch_xla.core.xla_model as xm
31
-
32
- from .logging import get_logger
33
- from .state import PartialState
34
-
35
-
36
  logger = get_logger(__name__)
37
 
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  logger = get_logger(__name__)
2
 
3
 
src/commands/accelerate_cli.py CHANGED
@@ -1,17 +1,4 @@
1
  #!/usr/bin/env python
2
-
3
-
4
-
5
- from argparse import ArgumentParser
6
-
7
- from accelerate.commands.config import get_config_parser
8
- from accelerate.commands.env import env_command_parser
9
- from accelerate.commands.estimate import estimate_command_parser
10
- from accelerate.commands.launch import launch_command_parser
11
- from accelerate.commands.test import test_command_parser
12
- from accelerate.commands.tpu import tpu_command_parser
13
-
14
-
15
  def main():
16
  parser = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
17
  subparsers = parser.add_subparsers(help="accelerate command helpers")
 
1
  #!/usr/bin/env python
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  def main():
3
  parser = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
4
  subparsers = parser.add_subparsers(help="accelerate command helpers")
src/commands/config/__init__.py DELETED
@@ -1,40 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
-
4
-
5
- import argparse
6
-
7
- from .config import config_command_parser
8
- from .config_args import default_config_file, load_config_from_file # noqa: F401
9
- from .default import default_command_parser
10
- from .update import update_command_parser
11
-
12
-
13
- def get_config_parser(subparsers=None):
14
- parent_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
15
- # The main config parser
16
- config_parser = config_command_parser(subparsers)
17
- # The subparser to add commands to
18
- subcommands = config_parser.add_subparsers(title="subcommands", dest="subcommand")
19
-
20
- # Then add other parsers with the parent parser
21
- default_command_parser(subcommands, parents=[parent_parser])
22
- update_command_parser(subcommands, parents=[parent_parser])
23
-
24
- return config_parser
25
-
26
-
27
- def main():
28
- config_parser = get_config_parser()
29
- args = config_parser.parse_args()
30
-
31
- if not hasattr(args, "func"):
32
- config_parser.print_help()
33
- exit(1)
34
-
35
- # Run
36
- args.func(args)
37
-
38
-
39
- if __name__ == "__main__":
40
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/commands/config/cluster.py CHANGED
@@ -1,38 +1,4 @@
1
  #!/usr/bin/env python
2
-
3
-
4
-
5
- import os
6
-
7
- from ...utils import (
8
- ComputeEnvironment,
9
- DistributedType,
10
- is_deepspeed_available,
11
- is_mps_available,
12
- is_npu_available,
13
- is_transformers_available,
14
- is_xpu_available,
15
- )
16
- from ...utils.constants import (
17
- DEEPSPEED_MULTINODE_LAUNCHERS,
18
- FSDP_AUTO_WRAP_POLICY,
19
- FSDP_BACKWARD_PREFETCH,
20
- FSDP_SHARDING_STRATEGY,
21
- FSDP_STATE_DICT_TYPE,
22
- TORCH_DYNAMO_MODES,
23
- )
24
- from .config_args import ClusterConfig
25
- from .config_utils import (
26
- DYNAMO_BACKENDS,
27
- _ask_field,
28
- _ask_options,
29
- _convert_distributed_mode,
30
- _convert_dynamo_backend,
31
- _convert_mixed_precision,
32
- _convert_yes_no_to_bool,
33
- )
34
-
35
-
36
  def get_cluster_input():
37
  distributed_type = _ask_options(
38
  "Which type of machine are you using?",
 
1
  #!/usr/bin/env python
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  def get_cluster_input():
3
  distributed_type = _ask_options(
4
  "Which type of machine are you using?",
src/commands/config/config.py CHANGED
@@ -1,18 +1,4 @@
1
  #!/usr/bin/env python
2
-
3
-
4
-
5
- import argparse
6
- import os
7
-
8
- from accelerate.utils import ComputeEnvironment
9
-
10
- from .cluster import get_cluster_input
11
- from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
12
- from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
13
- from .sagemaker import get_sagemaker_input
14
-
15
-
16
  description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
17
 
18
 
 
1
  #!/usr/bin/env python
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
3
 
4
 
src/commands/config/config_args.py CHANGED
@@ -1,19 +1,4 @@
1
  #!/usr/bin/env python
2
-
3
-
4
-
5
- import json
6
- import os
7
- from dataclasses import dataclass
8
- from enum import Enum
9
- from typing import List, Optional, Union
10
-
11
- import yaml
12
-
13
- from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType
14
- from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION
15
-
16
-
17
  hf_cache_home = os.path.expanduser(
18
  os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
19
  )
 
1
  #!/usr/bin/env python
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  hf_cache_home = os.path.expanduser(
3
  os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
4
  )
src/commands/config/config_utils.py CHANGED
@@ -1,19 +1,4 @@
1
  #!/usr/bin/env python
2
-
3
-
4
-
5
- import argparse
6
-
7
- from ...utils.dataclasses import (
8
- ComputeEnvironment,
9
- DistributedType,
10
- DynamoBackend,
11
- PrecisionType,
12
- SageMakerDistributedType,
13
- )
14
- from ..menu import BulletMenu
15
-
16
-
17
  DYNAMO_BACKENDS = [
18
  "EAGER",
19
  "AOT_EAGER",
 
1
  #!/usr/bin/env python
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  DYNAMO_BACKENDS = [
3
  "EAGER",
4
  "AOT_EAGER",
src/commands/config/default.py CHANGED
@@ -1,16 +1,4 @@
1
  #!/usr/bin/env python
2
-
3
-
4
-
5
- from pathlib import Path
6
-
7
- import torch
8
-
9
- from ...utils import is_npu_available, is_xpu_available
10
- from .config_args import ClusterConfig, default_json_config_file
11
- from .config_utils import SubcommandHelpFormatter
12
-
13
-
14
  description = "Create a default config file for Accelerate with only a few flags set."
15
 
16
 
 
1
  #!/usr/bin/env python
 
 
 
 
 
 
 
 
 
 
 
 
2
  description = "Create a default config file for Accelerate with only a few flags set."
3
 
4
 
src/commands/config/sagemaker.py CHANGED
@@ -1,28 +1,4 @@
1
  #!/usr/bin/env python
2
-
3
-
4
- import json
5
- import os
6
-
7
- from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
8
- from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
9
- from ...utils.imports import is_boto3_available
10
- from .config_args import SageMakerConfig
11
- from .config_utils import (
12
- DYNAMO_BACKENDS,
13
- _ask_field,
14
- _ask_options,
15
- _convert_dynamo_backend,
16
- _convert_mixed_precision,
17
- _convert_sagemaker_distributed_mode,
18
- _convert_yes_no_to_bool,
19
- )
20
-
21
-
22
- if is_boto3_available():
23
- import boto3 # noqa: F401
24
-
25
-
26
  def _create_iam_role_for_sagemaker(role_name):
27
  iam_client = boto3.client("iam")
28
 
 
1
  #!/usr/bin/env python
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  def _create_iam_role_for_sagemaker(role_name):
3
  iam_client = boto3.client("iam")
4
 
src/commands/config/update.py CHANGED
@@ -1,13 +1,4 @@
1
  #!/usr/bin/env python
2
-
3
-
4
-
5
- from pathlib import Path
6
-
7
- from .config_args import default_config_file, load_config_from_file
8
- from .config_utils import SubcommandHelpFormatter
9
-
10
-
11
  description = "Update an existing config file with the latest defaults while maintaining the old configuration."
12
 
13
 
 
1
  #!/usr/bin/env python
 
 
 
 
 
 
 
 
 
2
  description = "Update an existing config file with the latest defaults while maintaining the old configuration."
3
 
4
 
src/commands/env.py CHANGED
@@ -1,21 +1,4 @@
1
  #!/usr/bin/env python
2
-
3
-
4
-
5
- import argparse
6
- import os
7
- import platform
8
-
9
- import numpy as np
10
- import psutil
11
- import torch
12
-
13
- from accelerate import __version__ as version
14
- from accelerate.commands.config import default_config_file, load_config_from_file
15
-
16
- from ..utils import is_npu_available, is_xpu_available
17
-
18
-
19
  def env_command_parser(subparsers=None):
20
  if subparsers is not None:
21
  parser = subparsers.add_parser("env")
 
1
  #!/usr/bin/env python
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  def env_command_parser(subparsers=None):
3
  if subparsers is not None:
4
  parser = subparsers.add_parser("env")
src/commands/estimate.py CHANGED
@@ -1,28 +1,4 @@
1
  #!/usr/bin/env python
2
-
3
-
4
- import argparse
5
-
6
- from huggingface_hub import model_info
7
- from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
8
-
9
- from accelerate import init_empty_weights
10
- from accelerate.utils import (
11
- calculate_maximum_sizes,
12
- convert_bytes,
13
- is_timm_available,
14
- is_transformers_available,
15
- )
16
-
17
-
18
- if is_transformers_available():
19
- import transformers
20
- from transformers import AutoConfig, AutoModel
21
-
22
- if is_timm_available():
23
- import timm
24
-
25
-
26
  def verify_on_hub(repo: str, token: str = None):
27
  "Verifies that the model is on the hub and returns the model info."
28
  try:
 
1
  #!/usr/bin/env python
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  def verify_on_hub(repo: str, token: str = None):
3
  "Verifies that the model is on the hub and returns the model info."
4
  try:
src/commands/launch.py CHANGED
@@ -1,54 +1,4 @@
1
  #!/usr/bin/env python
2
-
3
-
4
-
5
- import argparse
6
- import importlib
7
- import logging
8
- import os
9
- import subprocess
10
- import sys
11
- from pathlib import Path
12
-
13
- import psutil
14
- import torch
15
-
16
- from accelerate.commands.config import default_config_file, load_config_from_file
17
- from accelerate.commands.config.config_args import SageMakerConfig
18
- from accelerate.commands.config.config_utils import DYNAMO_BACKENDS
19
- from accelerate.state import get_int_from_env
20
- from accelerate.utils import (
21
- ComputeEnvironment,
22
- DistributedType,
23
- PrepareForLaunch,
24
- _filter_args,
25
- check_cuda_p2p_ib_support,
26
- is_bf16_available,
27
- is_deepspeed_available,
28
- is_npu_available,
29
- is_rich_available,
30
- is_sagemaker_available,
31
- is_torch_version,
32
- is_tpu_available,
33
- is_xpu_available,
34
- patch_environment,
35
- prepare_deepspeed_cmd_env,
36
- prepare_multi_gpu_env,
37
- prepare_sagemager_args_inputs,
38
- prepare_simple_launcher_cmd_env,
39
- prepare_tpu,
40
- )
41
- from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES
42
-
43
-
44
- if is_rich_available():
45
- from rich import get_console
46
- from rich.logging import RichHandler
47
-
48
- FORMAT = "%(message)s"
49
- logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
50
-
51
-
52
  logger = logging.getLogger(__name__)
53
 
54
  options_to_group = {
 
1
  #!/usr/bin/env python
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  logger = logging.getLogger(__name__)
3
 
4
  options_to_group = {
src/commands/menu/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .selection_menu import BulletMenu
 
 
src/commands/menu/cursor.py DELETED
@@ -1,53 +0,0 @@
1
-
2
-
3
- """
4
- A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet
5
- """
6
-
7
- import os
8
- import sys
9
- from contextlib import contextmanager
10
-
11
-
12
- # Windows only
13
- if os.name == "nt":
14
- import ctypes
15
- import msvcrt # noqa
16
-
17
- class CursorInfo(ctypes.Structure):
18
- # _fields is a specific attr expected by ctypes
19
- _fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
20
-
21
-
22
- def hide_cursor():
23
- if os.name == "nt":
24
- ci = CursorInfo()
25
- handle = ctypes.windll.kernel32.GetStdHandle(-11)
26
- ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
27
- ci.visible = False
28
- ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
29
- elif os.name == "posix":
30
- sys.stdout.write("\033[?25l")
31
- sys.stdout.flush()
32
-
33
-
34
- def show_cursor():
35
- if os.name == "nt":
36
- ci = CursorInfo()
37
- handle = ctypes.windll.kernel32.GetStdHandle(-11)
38
- ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
39
- ci.visible = True
40
- ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
41
- elif os.name == "posix":
42
- sys.stdout.write("\033[?25h")
43
- sys.stdout.flush()
44
-
45
-
46
- @contextmanager
47
- def hide():
48
- "Context manager to hide the terminal cursor"
49
- try:
50
- hide_cursor()
51
- yield
52
- finally:
53
- show_cursor()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/commands/menu/helpers.py DELETED
@@ -1,47 +0,0 @@
1
-
2
-
3
- """
4
- A variety of helper functions and constants when dealing with terminal menu choices, based on
5
- https://github.com/bchao1/bullet
6
- """
7
-
8
- import enum
9
- import shutil
10
- import sys
11
-
12
-
13
- TERMINAL_WIDTH, _ = shutil.get_terminal_size()
14
-
15
- CURSOR_TO_CHAR = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
16
-
17
-
18
- class Direction(enum.Enum):
19
- UP = 0
20
- DOWN = 1
21
-
22
-
23
- def forceWrite(content, end=""):
24
- sys.stdout.write(str(content) + end)
25
- sys.stdout.flush()
26
-
27
-
28
- def writeColor(content, color, end=""):
29
- forceWrite(f"\u001b[{color}m{content}\u001b[0m", end)
30
-
31
-
32
- def reset_cursor():
33
- forceWrite("\r")
34
-
35
-
36
- def move_cursor(num_lines: int, direction: str):
37
- forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}")
38
-
39
-
40
- def clear_line():
41
- forceWrite(" " * TERMINAL_WIDTH)
42
- reset_cursor()
43
-
44
-
45
- def linebreak():
46
- reset_cursor()
47
- forceWrite("-" * TERMINAL_WIDTH)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/commands/menu/input.py DELETED
@@ -1,74 +0,0 @@
1
-
2
-
3
- """
4
- This file contains utilities for handling input from the user and registering specific keys to specific functions,
5
- based on https://github.com/bchao1/bullet
6
- """
7
-
8
- from typing import List
9
-
10
- from .keymap import KEYMAP, get_character
11
-
12
-
13
- def mark(key: str):
14
- """
15
- Mark the function with the key code so it can be handled in the register
16
- """
17
-
18
- def decorator(func):
19
- handle = getattr(func, "handle_key", [])
20
- handle += [key]
21
- setattr(func, "handle_key", handle)
22
- return func
23
-
24
- return decorator
25
-
26
-
27
- def mark_multiple(*keys: List[str]):
28
- """
29
- Mark the function with the key codes so it can be handled in the register
30
- """
31
-
32
- def decorator(func):
33
- handle = getattr(func, "handle_key", [])
34
- handle += keys
35
- setattr(func, "handle_key", handle)
36
- return func
37
-
38
- return decorator
39
-
40
-
41
- class KeyHandler(type):
42
- """
43
- Metaclass that adds the key handlers to the class
44
- """
45
-
46
- def __new__(cls, name, bases, attrs):
47
- new_cls = super().__new__(cls, name, bases, attrs)
48
- if not hasattr(new_cls, "key_handler"):
49
- setattr(new_cls, "key_handler", {})
50
- setattr(new_cls, "handle_input", KeyHandler.handle_input)
51
-
52
- for value in attrs.values():
53
- handled_keys = getattr(value, "handle_key", [])
54
- for key in handled_keys:
55
- new_cls.key_handler[key] = value
56
- return new_cls
57
-
58
- @staticmethod
59
- def handle_input(cls):
60
- "Finds and returns the selected character if it exists in the handler"
61
- char = get_character()
62
- if char != KEYMAP["undefined"]:
63
- char = ord(char)
64
- handler = cls.key_handler.get(char)
65
- if handler:
66
- cls.current_selection = char
67
- return handler(cls)
68
- else:
69
- return None
70
-
71
-
72
- def register(cls):
73
- """Adds KeyHandler metaclass to the class"""
74
- return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/commands/menu/keymap.py DELETED
@@ -1,122 +0,0 @@
1
-
2
-
3
- """
4
- Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet
5
- """
6
-
7
-
8
- import os
9
- import string
10
- import sys
11
-
12
-
13
- ARROW_KEY_FLAG = 1 << 8
14
-
15
- KEYMAP = {
16
- "tab": ord("\t"),
17
- "newline": ord("\r"),
18
- "esc": 27,
19
- "up": 65 + ARROW_KEY_FLAG,
20
- "down": 66 + ARROW_KEY_FLAG,
21
- "right": 67 + ARROW_KEY_FLAG,
22
- "left": 68 + ARROW_KEY_FLAG,
23
- "mod_int": 91,
24
- "undefined": sys.maxsize,
25
- "interrupt": 3,
26
- "insert": 50,
27
- "delete": 51,
28
- "pg_up": 53,
29
- "pg_down": 54,
30
- }
31
-
32
- KEYMAP["arrow_begin"] = KEYMAP["up"]
33
- KEYMAP["arrow_end"] = KEYMAP["left"]
34
-
35
- if sys.platform == "win32":
36
- WIN_CH_BUFFER = []
37
- WIN_KEYMAP = {
38
- b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
39
- b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
40
- b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
41
- b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
42
- b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
43
- b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
44
- b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
45
- b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
46
- }
47
-
48
- for i in range(10):
49
- KEYMAP[str(i)] = ord(str(i))
50
-
51
-
52
- def get_raw_chars():
53
- "Gets raw characters from inputs"
54
- if os.name == "nt":
55
- import msvcrt
56
-
57
- encoding = "mbcs"
58
- # Flush the keyboard buffer
59
- while msvcrt.kbhit():
60
- msvcrt.getch()
61
- if len(WIN_CH_BUFFER) == 0:
62
- # Read the keystroke
63
- ch = msvcrt.getch()
64
-
65
- # If it is a prefix char, get second part
66
- if ch in (b"\x00", b"\xe0"):
67
- ch2 = ch + msvcrt.getch()
68
- # Translate actual Win chars to bullet char types
69
- try:
70
- chx = chr(WIN_KEYMAP[ch2])
71
- WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"]))
72
- WIN_CH_BUFFER.append(chx)
73
- if ord(chx) in (
74
- KEYMAP["insert"] - 1 << 9,
75
- KEYMAP["delete"] - 1 << 9,
76
- KEYMAP["pg_up"] - 1 << 9,
77
- KEYMAP["pg_down"] - 1 << 9,
78
- ):
79
- WIN_CH_BUFFER.append(chr(126))
80
- ch = chr(KEYMAP["esc"])
81
- except KeyError:
82
- ch = ch2[1]
83
- else:
84
- ch = ch.decode(encoding)
85
- else:
86
- ch = WIN_CH_BUFFER.pop(0)
87
- elif os.name == "posix":
88
- import termios
89
- import tty
90
-
91
- fd = sys.stdin.fileno()
92
- old_settings = termios.tcgetattr(fd)
93
- try:
94
- tty.setraw(fd)
95
- ch = sys.stdin.read(1)
96
- finally:
97
- termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
98
- return ch
99
-
100
-
101
- def get_character():
102
- "Gets a character from the keyboard and returns the key code"
103
- char = get_raw_chars()
104
- if ord(char) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
105
- return char
106
-
107
- elif ord(char) == KEYMAP["esc"]:
108
- combo = get_raw_chars()
109
- if ord(combo) == KEYMAP["mod_int"]:
110
- key = get_raw_chars()
111
- if ord(key) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
112
- return chr(ord(key) + ARROW_KEY_FLAG)
113
- else:
114
- return KEYMAP["undefined"]
115
- else:
116
- return get_raw_chars()
117
-
118
- else:
119
- if char in string.printable:
120
- return char
121
- else:
122
- return KEYMAP["undefined"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/commands/menu/selection_menu.py DELETED
@@ -1,131 +0,0 @@
1
-
2
-
3
- """
4
- Main driver for the selection menu, based on https://github.com/bchao1/bullet
5
- """
6
- import builtins
7
- import sys
8
-
9
- from ...utils.imports import _is_package_available
10
- from . import cursor, input
11
- from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
12
- from .keymap import KEYMAP
13
-
14
-
15
- in_colab = False
16
- try:
17
- in_colab = _is_package_available("google.colab")
18
- except ModuleNotFoundError:
19
- pass
20
-
21
-
22
- @input.register
23
- class BulletMenu:
24
- """
25
- A CLI menu to select a choice from a list of choices using the keyboard.
26
- """
27
-
28
- def __init__(self, prompt: str = None, choices: list = []):
29
- self.position = 0
30
- self.choices = choices
31
- self.prompt = prompt
32
- if sys.platform == "win32":
33
- self.arrow_char = "*"
34
- else:
35
- self.arrow_char = "➔ "
36
-
37
- def write_choice(self, index, end: str = ""):
38
- if sys.platform != "win32":
39
- writeColor(self.choices[index], 32, end)
40
- else:
41
- forceWrite(self.choices[index], end)
42
-
43
- def print_choice(self, index: int):
44
- "Prints the choice at the given index"
45
- if index == self.position:
46
- forceWrite(f" {self.arrow_char} ")
47
- self.write_choice(index)
48
- else:
49
- forceWrite(f" {self.choices[index]}")
50
- reset_cursor()
51
-
52
- def move_direction(self, direction: Direction, num_spaces: int = 1):
53
- "Should not be directly called, used to move a direction of either up or down"
54
- old_position = self.position
55
- if direction == Direction.DOWN:
56
- if self.position + 1 >= len(self.choices):
57
- return
58
- self.position += num_spaces
59
- else:
60
- if self.position - 1 < 0:
61
- return
62
- self.position -= num_spaces
63
- clear_line()
64
- self.print_choice(old_position)
65
- move_cursor(num_spaces, direction.name)
66
- self.print_choice(self.position)
67
-
68
- @input.mark(KEYMAP["up"])
69
- def move_up(self):
70
- self.move_direction(Direction.UP)
71
-
72
- @input.mark(KEYMAP["down"])
73
- def move_down(self):
74
- self.move_direction(Direction.DOWN)
75
-
76
- @input.mark(KEYMAP["newline"])
77
- def select(self):
78
- move_cursor(len(self.choices) - self.position, "DOWN")
79
- return self.position
80
-
81
- @input.mark(KEYMAP["interrupt"])
82
- def interrupt(self):
83
- move_cursor(len(self.choices) - self.position, "DOWN")
84
- raise KeyboardInterrupt
85
-
86
- @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)])
87
- def select_row(self):
88
- index = int(chr(self.current_selection))
89
- movement = index - self.position
90
- if index == self.position:
91
- return
92
- if index < len(self.choices):
93
- if self.position > index:
94
- self.move_direction(Direction.UP, -movement)
95
- elif self.position < index:
96
- self.move_direction(Direction.DOWN, movement)
97
- else:
98
- return
99
- else:
100
- return
101
-
102
- def run(self, default_choice: int = 0):
103
- "Start the menu and return the selected choice"
104
- if self.prompt:
105
- linebreak()
106
- forceWrite(self.prompt, "\n")
107
- if in_colab:
108
- forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
109
- else:
110
- forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
111
- self.position = default_choice
112
- for i in range(len(self.choices)):
113
- self.print_choice(i)
114
- forceWrite("\n")
115
- move_cursor(len(self.choices) - self.position, "UP")
116
- with cursor.hide():
117
- while True:
118
- if in_colab:
119
- try:
120
- choice = int(builtins.input())
121
- except ValueError:
122
- choice = default_choice
123
- else:
124
- choice = self.handle_input()
125
- if choice is not None:
126
- reset_cursor()
127
- for _ in range(len(self.choices) + 1):
128
- move_cursor(1, "UP")
129
- clear_line()
130
- self.write_choice(choice, "\n")
131
- return choice
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/commands/test.py CHANGED
@@ -1,13 +1,4 @@
1
  #!/usr/bin/env python
2
-
3
-
4
-
5
- import argparse
6
- import os
7
-
8
- from accelerate.test_utils import execute_subprocess_async
9
-
10
-
11
  def test_command_parser(subparsers=None):
12
  if subparsers is not None:
13
  parser = subparsers.add_parser("test")
 
1
  #!/usr/bin/env python
 
 
 
 
 
 
 
 
 
2
  def test_command_parser(subparsers=None):
3
  if subparsers is not None:
4
  parser = subparsers.add_parser("test")
src/commands/tpu.py CHANGED
@@ -1,16 +1,4 @@
1
  #!/usr/bin/env python
2
-
3
-
4
-
5
- import argparse
6
- import os
7
- import subprocess
8
-
9
- from packaging.version import Version, parse
10
-
11
- from accelerate.commands.config.config_args import default_config_file, load_config_from_file
12
-
13
-
14
  _description = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
15
 
16
 
 
1
  #!/usr/bin/env python
 
 
 
 
 
 
 
 
 
 
 
 
2
  _description = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
3
 
4
 
src/data_loader.py CHANGED
@@ -1,29 +1,3 @@
1
-
2
-
3
- import math
4
- from contextlib import suppress
5
- from typing import Callable, List, Optional, Union
6
-
7
- import torch
8
- from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler
9
-
10
- from .logging import get_logger
11
- from .state import AcceleratorState, DistributedType, GradientState, is_tpu_available
12
- from .utils import (
13
- RNGType,
14
- broadcast,
15
- broadcast_object_list,
16
- concatenate,
17
- find_batch_size,
18
- get_data_structure,
19
- initialize_tensors,
20
- is_torch_version,
21
- send_to_device,
22
- slice_tensors,
23
- synchronize_rng_states,
24
- )
25
-
26
-
27
  logger = get_logger(__name__)
28
 
29
  # kwargs of the DataLoader in min version 1.4.0.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  logger = get_logger(__name__)
2
 
3
  # kwargs of the DataLoader in min version 1.4.0.
src/hooks.py CHANGED
@@ -1,22 +1,3 @@
1
-
2
-
3
- import functools
4
- from typing import Dict, List, Mapping, Optional, Union
5
-
6
- import torch
7
- import torch.nn as nn
8
-
9
- from .state import PartialState
10
- from .utils import (
11
- PrefixedDataset,
12
- find_device,
13
- named_module_tensors,
14
- send_to_device,
15
- set_module_tensor_to_device,
16
- )
17
- from .utils.modeling import get_non_persistent_buffers
18
-
19
-
20
  class ModelHook:
21
  """
22
  A hook that contains callbacks to be executed just before and after the forward method of a model. The difference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  class ModelHook:
2
  """
3
  A hook that contains callbacks to be executed just before and after the forward method of a model. The difference
src/launchers.py CHANGED
@@ -1,22 +1,3 @@
1
-
2
-
3
- import os
4
- import sys
5
- import tempfile
6
-
7
- import torch
8
-
9
- from .state import AcceleratorState, PartialState
10
- from .utils import (
11
- PrecisionType,
12
- PrepareForLaunch,
13
- are_libraries_initialized,
14
- check_cuda_p2p_ib_support,
15
- is_mps_available,
16
- patch_environment,
17
- )
18
-
19
-
20
  def test_launch():
21
  "Verify a `PartialState` can be initialized."
22
  _ = PartialState()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  def test_launch():
2
  "Verify a `PartialState` can be initialized."
3
  _ = PartialState()
src/local_sgd.py CHANGED
@@ -1,9 +1,3 @@
1
-
2
- import torch
3
-
4
- from accelerate import Accelerator, DistributedType
5
-
6
-
7
  class LocalSGD:
8
  """
9
  A helper class to support local SGD on top of Accelerator. It simply runs a given number of updates independently
 
 
 
 
 
 
 
1
  class LocalSGD:
2
  """
3
  A helper class to support local SGD on top of Accelerator. It simply runs a given number of updates independently
src/logging.py CHANGED
@@ -1,12 +1,3 @@
1
-
2
-
3
- import functools
4
- import logging
5
- import os
6
-
7
- from .state import PartialState
8
-
9
-
10
  class MultiProcessAdapter(logging.LoggerAdapter):
11
  """
12
  An adapter to assist with logging in multiprocess.
 
 
 
 
 
 
 
 
 
 
1
  class MultiProcessAdapter(logging.LoggerAdapter):
2
  """
3
  An adapter to assist with logging in multiprocess.
src/memory_utils.py DELETED
@@ -1,10 +0,0 @@
1
-
2
-
3
- import warnings
4
-
5
-
6
- warnings.warn(
7
- "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
8
- "`from accelerate import find_executable_batch_size` to avoid this warning.",
9
- FutureWarning,
10
- )
 
 
 
 
 
 
 
 
 
 
 
src/optimizer.py CHANGED
@@ -1,18 +1,3 @@
1
-
2
-
3
- import inspect
4
- import warnings
5
-
6
- import torch
7
-
8
- from .state import AcceleratorState, GradientState
9
- from .utils import DistributedType, honor_type, is_tpu_available
10
-
11
-
12
- if is_tpu_available(check_device=False):
13
- import torch_xla.core.xla_model as xm
14
-
15
-
16
  def move_to_device(state, device):
17
  if isinstance(state, (list, tuple)):
18
  return honor_type(state, (move_to_device(t, device) for t in state))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  def move_to_device(state, device):
2
  if isinstance(state, (list, tuple)):
3
  return honor_type(state, (move_to_device(t, device) for t in state))
src/scheduler.py CHANGED
@@ -2,14 +2,6 @@
2
 
3
  # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
4
 
5
- import warnings
6
-
7
- from .state import AcceleratorState, GradientState
8
-
9
-
10
- warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
11
-
12
-
13
  class AcceleratedScheduler:
14
  """
15
  A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful
 
2
 
3
  # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
4
 
 
 
 
 
 
 
 
 
5
  class AcceleratedScheduler:
6
  """
7
  A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful
src/state.py CHANGED
@@ -1,47 +1,3 @@
1
-
2
-
3
- from __future__ import annotations
4
-
5
- import logging
6
- import math
7
- import os
8
- import threading
9
- import warnings
10
- from contextlib import contextmanager
11
- from functools import partial
12
- from typing import Any, Callable, Optional
13
-
14
- import torch
15
-
16
- from .utils import (
17
- DistributedType,
18
- DynamoBackend,
19
- GradientAccumulationPlugin,
20
- check_cuda_p2p_ib_support,
21
- check_fp8_capability,
22
- get_ccl_version,
23
- get_int_from_env,
24
- is_ccl_available,
25
- is_deepspeed_available,
26
- is_fp8_available,
27
- is_ipex_available,
28
- is_mps_available,
29
- is_npu_available,
30
- is_tpu_available,
31
- is_xpu_available,
32
- parse_choice_from_env,
33
- parse_flag_from_env,
34
- )
35
- from .utils.dataclasses import SageMakerDistributedType
36
-
37
-
38
- if is_tpu_available(check_device=False):
39
- import torch_xla.core.xla_model as xm
40
-
41
-
42
- if is_npu_available(check_device=False):
43
- import torch_npu # noqa: F401
44
-
45
  logger = logging.getLogger(__name__)
46
 
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  logger = logging.getLogger(__name__)
2
 
3
 
src/test_utils/examples.py DELETED
@@ -1,134 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
-
4
- """
5
- A collection of utilities for comparing `examples/complete_*_example.py` scripts with the capabilities inside of each
6
- `examples/by_feature` example. `compare_against_test` is the main function that should be used when testing, while the
7
- others are used to either get the code that matters, or to preprocess them (such as stripping comments)
8
- """
9
-
10
- import os
11
- from typing import List
12
-
13
-
14
- def get_function_contents_by_name(lines: List[str], name: str):
15
- """
16
- Extracts a function from `lines` of segmented source code with the name `name`.
17
-
18
- Args:
19
- lines (`List[str]`):
20
- Source code of a script seperated by line.
21
- name (`str`):
22
- The name of the function to extract. Should be either `training_function` or `main`
23
- """
24
- if name != "training_function" and name != "main":
25
- raise ValueError(f"Incorrect function name passed: {name}, choose either 'main' or 'training_function'")
26
- good_lines, found_start = [], False
27
- for line in lines:
28
- if not found_start and f"def {name}" in line:
29
- found_start = True
30
- good_lines.append(line)
31
- continue
32
- if found_start:
33
- if name == "training_function" and "def main" in line:
34
- return good_lines
35
- if name == "main" and "if __name__" in line:
36
- return good_lines
37
- good_lines.append(line)
38
-
39
-
40
- def clean_lines(lines: List[str]):
41
- """
42
- Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\n')
43
-
44
- Args:
45
- lines (`List[str]`):
46
- Source code of a script seperated by line.
47
- """
48
- return [line for line in lines if not line.lstrip().startswith("#") and line != "\n"]
49
-
50
-
51
- def compare_against_test(base_filename: str, feature_filename: str, parser_only: bool, secondary_filename: str = None):
52
- """
53
- Tests whether the additional code inside of `feature_filename` was implemented in `base_filename`. This should be
54
- used when testing to see if `complete_*_.py` examples have all of the implementations from each of the
55
- `examples/by_feature/*` scripts.
56
-
57
- It utilizes `nlp_example.py` to extract out all of the repeated training code, so that only the new additional code
58
- is examined and checked. If something *other* than `nlp_example.py` should be used, such as `cv_example.py` for the
59
- `complete_cv_example.py` script, it should be passed in for the `secondary_filename` parameter.
60
-
61
- Args:
62
- base_filename (`str` or `os.PathLike`):
63
- The filepath of a single "complete" example script to test, such as `examples/complete_cv_example.py`
64
- feature_filename (`str` or `os.PathLike`):
65
- The filepath of a single feature example script. The contents of this script are checked to see if they
66
- exist in `base_filename`
67
- parser_only (`bool`):
68
- Whether to compare only the `main()` sections in both files, or to compare the contents of
69
- `training_loop()`
70
- secondary_filename (`str`, *optional*):
71
- A potential secondary filepath that should be included in the check. This function extracts the base
72
- functionalities off of "examples/nlp_example.py", so if `base_filename` is a script other than
73
- `complete_nlp_example.py`, the template script should be included here. Such as `examples/cv_example.py`
74
- """
75
- with open(base_filename, "r") as f:
76
- base_file_contents = f.readlines()
77
- with open(os.path.abspath(os.path.join("examples", "nlp_example.py")), "r") as f:
78
- full_file_contents = f.readlines()
79
- with open(feature_filename, "r") as f:
80
- feature_file_contents = f.readlines()
81
- if secondary_filename is not None:
82
- with open(secondary_filename, "r") as f:
83
- secondary_file_contents = f.readlines()
84
-
85
- # This is our base, we remove all the code from here in our `full_filename` and `feature_filename` to find the new content
86
- if parser_only:
87
- base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "main"))
88
- full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "main"))
89
- feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "main"))
90
- if secondary_filename is not None:
91
- secondary_file_func = clean_lines(get_function_contents_by_name(secondary_file_contents, "main"))
92
- else:
93
- base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "training_function"))
94
- full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "training_function"))
95
- feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "training_function"))
96
- if secondary_filename is not None:
97
- secondary_file_func = clean_lines(
98
- get_function_contents_by_name(secondary_file_contents, "training_function")
99
- )
100
-
101
- _dl_line = "train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n"
102
-
103
- # Specific code in our script that differs from the full version, aka what is new
104
- new_feature_code = []
105
- passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement
106
- it = iter(feature_file_func)
107
- for i in range(len(feature_file_func) - 1):
108
- if i not in passed_idxs:
109
- line = next(it)
110
- if (line not in full_file_func) and (line.lstrip() != _dl_line):
111
- if "TESTING_MOCKED_DATALOADERS" not in line:
112
- new_feature_code.append(line)
113
- passed_idxs.append(i)
114
- else:
115
- # Skip over the `config['num_epochs'] = 2` statement
116
- _ = next(it)
117
-
118
- # Extract out just the new parts from the full_file_training_func
119
- new_full_example_parts = []
120
- passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement
121
- for i, line in enumerate(base_file_func):
122
- if i not in passed_idxs:
123
- if (line not in full_file_func) and (line.lstrip() != _dl_line):
124
- if "TESTING_MOCKED_DATALOADERS" not in line:
125
- new_full_example_parts.append(line)
126
- passed_idxs.append(i)
127
-
128
- # Finally, get the overall diff
129
- diff_from_example = [line for line in new_feature_code if line not in new_full_example_parts]
130
- if secondary_filename is not None:
131
- diff_from_two = [line for line in full_file_contents if line not in secondary_file_func]
132
- diff_from_example = [line for line in diff_from_example if line not in diff_from_two]
133
-
134
- return diff_from_example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/test_utils/scripts/__init__.py DELETED
File without changes
src/test_utils/scripts/external_deps/__init__.py DELETED
File without changes
src/test_utils/scripts/external_deps/test_checkpointing.py DELETED
@@ -1,257 +0,0 @@
1
- # coding=utf-8
2
-
3
- import argparse
4
- import json
5
- import os
6
-
7
- import evaluate
8
- import torch
9
- from datasets import load_dataset
10
- from torch.optim import AdamW
11
- from torch.utils.data import DataLoader
12
- from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
13
-
14
- from accelerate import Accelerator, DistributedType
15
- from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
16
-
17
-
18
- MAX_GPU_BATCH_SIZE = 16
19
- EVAL_BATCH_SIZE = 32
20
-
21
-
22
- def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"):
23
- """
24
- Creates a set of `DataLoader`s for the `glue` dataset.
25
-
26
- Args:
27
- accelerator (`Accelerator`):
28
- An `Accelerator` object
29
- batch_size (`int`, *optional*):
30
- The batch size for the train and validation DataLoaders.
31
- model_name (`str`, *optional*):
32
- """
33
- tokenizer = AutoTokenizer.from_pretrained(model_name)
34
- datasets = load_dataset("glue", "mrpc")
35
-
36
- def tokenize_function(examples):
37
- # max_length=None => use the model max length (it's actually the default)
38
- outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
39
- return outputs
40
-
41
- # Apply the method we just defined to all the examples in all the splits of the dataset
42
- tokenized_datasets = datasets.map(
43
- tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
44
- )
45
-
46
- # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
47
- # transformers library
48
- tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
49
-
50
- def collate_fn(examples):
51
- # On TPU it's best to pad everything to the same length or training will be very slow.
52
- if accelerator.distributed_type == DistributedType.TPU:
53
- return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
54
- return tokenizer.pad(examples, padding="longest", return_tensors="pt")
55
-
56
- # Instantiate dataloaders.
57
- train_dataloader = DataLoader(
58
- tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
59
- )
60
- eval_dataloader = DataLoader(
61
- tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
62
- )
63
-
64
- return train_dataloader, eval_dataloader
65
-
66
-
67
- def evaluation_loop(accelerator, model, eval_dataloader, metric):
68
- model.eval()
69
- samples_seen = 0
70
- for step, batch in enumerate(eval_dataloader):
71
- # We could avoid this line since we set the accelerator with `device_placement=True`.
72
- batch.to(accelerator.device)
73
- with torch.no_grad():
74
- outputs = model(**batch)
75
- predictions = outputs.logits.argmax(dim=-1)
76
- # It is slightly faster to call this once, than multiple times
77
- predictions, references = accelerator.gather(
78
- (predictions, batch["labels"])
79
- ) # If we are in a multiprocess environment, the last batch has duplicates
80
- if accelerator.use_distributed:
81
- if step == len(eval_dataloader) - 1:
82
- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
83
- references = references[: len(eval_dataloader.dataset) - samples_seen]
84
- else:
85
- samples_seen += references.shape[0]
86
- metric.add_batch(
87
- predictions=predictions,
88
- references=references,
89
- )
90
-
91
- eval_metric = metric.compute()
92
- return eval_metric["accuracy"]
93
-
94
-
95
- def training_function(config, args):
96
- # Initialize accelerator
97
- accelerator = Accelerator()
98
-
99
- # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
100
- lr = config["lr"]
101
- num_epochs = int(config["num_epochs"])
102
- seed = int(config["seed"])
103
- batch_size = int(config["batch_size"])
104
- model_name = args.model_name_or_path
105
-
106
- set_seed(seed)
107
- train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)
108
-
109
- # Instantiate the model (we build the model here so that the seed also control new weights initialization)
110
- model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
111
-
112
- # Instantiate optimizer
113
- optimizer_cls = (
114
- AdamW
115
- if accelerator.state.deepspeed_plugin is None
116
- or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
117
- else DummyOptim
118
- )
119
- optimizer = optimizer_cls(params=model.parameters(), lr=lr)
120
-
121
- if accelerator.state.deepspeed_plugin is not None:
122
- gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
123
- "gradient_accumulation_steps"
124
- ]
125
- else:
126
- gradient_accumulation_steps = 1
127
- max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
128
-
129
- # Instantiate scheduler
130
- if (
131
- accelerator.state.deepspeed_plugin is None
132
- or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
133
- ):
134
- lr_scheduler = get_linear_schedule_with_warmup(
135
- optimizer=optimizer,
136
- num_warmup_steps=0,
137
- num_training_steps=max_training_steps,
138
- )
139
- else:
140
- lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
141
-
142
- # Prepare everything
143
- # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
144
- # prepare method.
145
- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
146
- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
147
- )
148
-
149
- # We need to keep track of how many total steps we have iterated over
150
- overall_step = 0
151
- # We also need to keep track of the stating epoch so files are named properly
152
- starting_epoch = 0
153
- metric = evaluate.load("glue", "mrpc")
154
- ending_epoch = num_epochs
155
-
156
- if args.partial_train_epoch is not None:
157
- ending_epoch = args.partial_train_epoch
158
-
159
- if args.resume_from_checkpoint:
160
- accelerator.load_state(args.resume_from_checkpoint)
161
- epoch_string = args.resume_from_checkpoint.split("epoch_")[1]
162
- state_epoch_num = ""
163
- for char in epoch_string:
164
- if char.isdigit():
165
- state_epoch_num += char
166
- else:
167
- break
168
- starting_epoch = int(state_epoch_num) + 1
169
- accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)
170
- accelerator.print("resumed checkpoint performance:", accuracy)
171
- accelerator.print("resumed checkpoint's scheduler's lr:", lr_scheduler.get_lr()[0])
172
- accelerator.print("resumed optimizers's lr:", optimizer.param_groups[0]["lr"])
173
- with open(os.path.join(args.output_dir, f"state_{starting_epoch-1}.json"), "r") as f:
174
- resumed_state = json.load(f)
175
- assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
176
- assert (
177
- resumed_state["lr"] == lr_scheduler.get_lr()[0]
178
- ), "Scheduler learning rate mismatch, loading from checkpoint failed"
179
- assert (
180
- resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
181
- ), "Optimizer learning rate mismatch, loading from checkpoint failed"
182
- assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
183
- return
184
-
185
- # Now we train the model
186
- state = {}
187
- for epoch in range(starting_epoch, ending_epoch):
188
- model.train()
189
- for step, batch in enumerate(train_dataloader):
190
- outputs = model(**batch)
191
- loss = outputs.loss
192
- loss = loss / gradient_accumulation_steps
193
- accelerator.backward(loss)
194
- if step % gradient_accumulation_steps == 0:
195
- optimizer.step()
196
- lr_scheduler.step()
197
- optimizer.zero_grad()
198
-
199
- overall_step += 1
200
- output_dir = f"epoch_{epoch}"
201
- output_dir = os.path.join(args.output_dir, output_dir)
202
- accelerator.save_state(output_dir)
203
- accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)
204
- state["accuracy"] = accuracy
205
- state["lr"] = lr_scheduler.get_lr()[0]
206
- state["optimizer_lr"] = optimizer.param_groups[0]["lr"]
207
- state["epoch"] = epoch
208
- state["step"] = overall_step
209
- accelerator.print(f"epoch {epoch}:", state)
210
-
211
- accelerator.wait_for_everyone()
212
- if accelerator.is_main_process:
213
- with open(os.path.join(args.output_dir, f"state_{epoch}.json"), "w") as f:
214
- json.dump(state, f)
215
-
216
-
217
- def main():
218
- parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
219
- parser.add_argument(
220
- "--model_name_or_path",
221
- type=str,
222
- default="bert-base-cased",
223
- help="Path to pretrained model or model identifier from huggingface.co/models.",
224
- required=False,
225
- )
226
- parser.add_argument(
227
- "--output_dir",
228
- type=str,
229
- default=".",
230
- help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
231
- )
232
- parser.add_argument(
233
- "--resume_from_checkpoint",
234
- type=str,
235
- default=None,
236
- help="If the training should continue from a checkpoint folder.",
237
- )
238
- parser.add_argument(
239
- "--partial_train_epoch",
240
- type=int,
241
- default=None,
242
- help="If passed, the training will stop after this number of epochs.",
243
- )
244
- parser.add_argument(
245
- "--num_epochs",
246
- type=int,
247
- default=2,
248
- help="Number of train epochs.",
249
- )
250
- args = parser.parse_args()
251
- config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
252
-
253
- training_function(config, args)
254
-
255
-
256
- if __name__ == "__main__":
257
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/test_utils/scripts/external_deps/test_metrics.py DELETED
@@ -1,280 +0,0 @@
1
-
2
-
3
- import logging
4
- import math
5
- import os
6
- from copy import deepcopy
7
-
8
- import datasets
9
- import evaluate
10
- import torch
11
- import transformers
12
- from datasets import load_dataset
13
- from torch.utils.data import DataLoader, IterableDataset
14
- from transformers import AutoModelForSequenceClassification, AutoTokenizer
15
-
16
- from accelerate import Accelerator
17
- from accelerate.data_loader import DataLoaderDispatcher
18
- from accelerate.test_utils import RegressionDataset, RegressionModel, torch_device
19
- from accelerate.utils import set_seed
20
-
21
-
22
- os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true"
23
-
24
-
25
- class ListHandler(logging.Handler):
26
- def __init__(self, *args, **kwargs):
27
- super(ListHandler, self).__init__(*args, **kwargs)
28
- self.logs = []
29
-
30
- def emit(self, record):
31
- self.logs.append(record)
32
-
33
-
34
- def get_basic_setup(accelerator, num_samples=82, batch_size=16):
35
- "Returns everything needed to perform basic training"
36
- set_seed(42)
37
- model = RegressionModel()
38
- ddp_model = deepcopy(model)
39
- dset = RegressionDataset(length=num_samples)
40
- dataloader = DataLoader(dset, batch_size=batch_size)
41
- model.to(accelerator.device)
42
- ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)
43
- return model, ddp_model, dataloader
44
-
45
-
46
- def get_dataloader(accelerator: Accelerator, use_longest=False):
47
- tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased")
48
- dataset = load_dataset("glue", "mrpc", split="validation")
49
-
50
- def tokenize_function(examples):
51
- outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
52
- return outputs
53
-
54
- with accelerator.main_process_first():
55
- tokenized_datasets = dataset.map(
56
- tokenize_function,
57
- batched=True,
58
- remove_columns=["idx", "sentence1", "sentence2"],
59
- )
60
-
61
- tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
62
-
63
- def collate_fn(examples):
64
- if use_longest:
65
- return tokenizer.pad(examples, padding="longest", return_tensors="pt")
66
- return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
67
-
68
- return DataLoader(tokenized_datasets, shuffle=False, collate_fn=collate_fn, batch_size=16)
69
-
70
-
71
- def get_mrpc_setup(dispatch_batches, split_batches):
72
- accelerator = Accelerator(dispatch_batches=dispatch_batches, split_batches=split_batches)
73
- dataloader = get_dataloader(accelerator, not dispatch_batches)
74
- model = AutoModelForSequenceClassification.from_pretrained(
75
- "hf-internal-testing/mrpc-bert-base-cased", return_dict=True
76
- )
77
- ddp_model, ddp_dataloader = accelerator.prepare(model, dataloader)
78
- return {
79
- "ddp": [ddp_model, ddp_dataloader, torch_device],
80
- "no": [model, dataloader, accelerator.device],
81
- }, accelerator
82
-
83
-
84
- def generate_predictions(model, dataloader, accelerator):
85
- logits_and_targets = []
86
- for batch in dataloader:
87
- input, target = batch.values()
88
- with torch.no_grad():
89
- logit = model(input)
90
- logit, target = accelerator.gather_for_metrics((logit, target))
91
- logits_and_targets.append((logit, target))
92
- logits, targs = [], []
93
- for logit, targ in logits_and_targets:
94
- logits.append(logit)
95
- targs.append(targ)
96
- logits, targs = torch.cat(logits), torch.cat(targs)
97
- return logits, targs
98
-
99
-
100
- def test_torch_metrics(
101
- accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False, batch_size=16
102
- ):
103
- model, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size)
104
- logits, targs = generate_predictions(ddp_model, dataloader, accelerator)
105
- assert (
106
- len(logits) == num_samples
107
- ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}"
108
-
109
-
110
- def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False):
111
- metric = evaluate.load("glue", "mrpc")
112
- setup, accelerator = get_mrpc_setup(dispatch_batches, split_batches)
113
- # First do baseline
114
- model, dataloader, device = setup["no"]
115
- model.to(device)
116
- model.eval()
117
- for batch in dataloader:
118
- batch.to(device)
119
- with torch.inference_mode():
120
- outputs = model(**batch)
121
- preds = outputs.logits.argmax(dim=-1)
122
- metric.add_batch(predictions=preds, references=batch["labels"])
123
- baseline = metric.compute()
124
-
125
- # Then do distributed
126
- model, dataloader, device = setup["ddp"]
127
- model.eval()
128
- for batch in dataloader:
129
- with torch.inference_mode():
130
- outputs = model(**batch)
131
- preds = outputs.logits.argmax(dim=-1)
132
- references = batch["labels"]
133
- preds, references = accelerator.gather_for_metrics((preds, references))
134
- metric.add_batch(predictions=preds, references=references)
135
- distributed = metric.compute()
136
-
137
- for key in "accuracy f1".split():
138
- assert math.isclose(
139
- baseline[key], distributed[key]
140
- ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
141
-
142
-
143
- def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset():
144
- class DummyIterableDataset(IterableDataset):
145
- def __init__(self, data):
146
- self.data = data
147
-
148
- def __len__(self):
149
- return len(self.data)
150
-
151
- def __iter__(self):
152
- for element in self.data:
153
- yield element
154
-
155
- iterable_dataset = DummyIterableDataset([n for n in range(30)])
156
- dataloader = DataLoader(iterable_dataset, batch_size=4)
157
- accelerator = Accelerator()
158
- prepared_dataloader = accelerator.prepare(dataloader)
159
-
160
- if accelerator.is_main_process:
161
- logger = logging.root.manager.loggerDict["accelerate.accelerator"]
162
- list_handler = ListHandler()
163
- logger.addHandler(list_handler)
164
-
165
- batches_for_metrics = []
166
- for batch in prepared_dataloader:
167
- batches_for_metrics.append(accelerator.gather_for_metrics(batch))
168
-
169
- assert torch.cat(batches_for_metrics).size(0) == 30
170
-
171
- if accelerator.is_main_process:
172
- assert len(list_handler.logs) == 0
173
- logger.removeHandler(list_handler)
174
-
175
-
176
- def test_gather_for_metrics_with_iterable_dataset():
177
- class DummyIterableDataset(IterableDataset):
178
- def __init__(self, data):
179
- self.data = data
180
-
181
- def __len__(self):
182
- return len(self.data)
183
-
184
- def __iter__(self):
185
- for element in self.data:
186
- yield element
187
-
188
- iterable_dataset = DummyIterableDataset(torch.as_tensor(range(30)))
189
- dataloader = DataLoader(iterable_dataset, batch_size=4)
190
-
191
- accelerator = Accelerator()
192
- prepared_dataloader = accelerator.prepare(dataloader)
193
-
194
- assert isinstance(prepared_dataloader, DataLoaderDispatcher)
195
-
196
- if accelerator.is_main_process:
197
- logger = logging.root.manager.loggerDict["accelerate.accelerator"]
198
- list_handler = ListHandler()
199
- logger.addHandler(list_handler)
200
-
201
- batches_for_metrics = []
202
- for batch in prepared_dataloader:
203
- batches_for_metrics.append(accelerator.gather_for_metrics(batch))
204
-
205
- assert torch.cat(batches_for_metrics).size(0) == 30
206
-
207
- if accelerator.is_main_process:
208
- assert len(list_handler.logs) == 0
209
-
210
- logger.removeHandler(list_handler)
211
-
212
-
213
- def test_gather_for_metrics_drop_last():
214
- accelerator = Accelerator()
215
- per_device_batch_size = 5
216
- num_items = (10 * accelerator.num_processes) + 1
217
- dataloader = DataLoader(range(num_items), batch_size=per_device_batch_size, drop_last=True)
218
- dataloader = accelerator.prepare(dataloader)
219
-
220
- iterator = iter(dataloader)
221
- next(iterator) # Skip first batch tensor([0, 1, 2, 3, 4], device='cuda:0')
222
- batch = next(iterator)
223
- gathered_items = accelerator.gather_for_metrics(batch)
224
-
225
- # Should return a full set of complete batches from each GPU
226
- num_expected_items = per_device_batch_size * accelerator.num_processes
227
- assert gathered_items.size(0) == (
228
- num_expected_items
229
- ), f"Expected number of items: {num_expected_items}, Actual: {gathered_items.size(0)}"
230
-
231
-
232
- def main():
233
- accelerator = Accelerator(split_batches=False, dispatch_batches=False)
234
- if accelerator.is_local_main_process:
235
- datasets.utils.logging.set_verbosity_warning()
236
- transformers.utils.logging.set_verbosity_warning()
237
- else:
238
- datasets.utils.logging.set_verbosity_error()
239
- transformers.utils.logging.set_verbosity_error()
240
- # These are a bit slower so they should only be ran on the GPU or TPU
241
- if accelerator.device.type != "cpu":
242
- if accelerator.is_local_main_process:
243
- print("**Testing gather_for_metrics**")
244
- for split_batches in [True, False]:
245
- for dispatch_batches in [True, False]:
246
- if accelerator.is_local_main_process:
247
- print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`")
248
- test_mrpc(dispatch_batches, split_batches)
249
- accelerator.state._reset_state()
250
- print("test_gather_for_metrics_with_iterable_dataset")
251
- test_gather_for_metrics_with_iterable_dataset()
252
- print("test gather_for_metrics_with_non_tensor_objects_iterable_dataset")
253
- test_gather_for_metrics_with_non_tensor_objects_iterable_dataset()
254
- if accelerator.is_local_main_process:
255
- print("**Test torch metrics**")
256
- for split_batches in [True, False]:
257
- for dispatch_batches in [True, False]:
258
- accelerator = Accelerator(split_batches=split_batches, dispatch_batches=dispatch_batches)
259
- if accelerator.is_local_main_process:
260
- print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99")
261
- test_torch_metrics(accelerator, 99)
262
- accelerator.state._reset_state()
263
- if accelerator.is_local_main_process:
264
- print("**Test last batch is not dropped when perfectly divisible**")
265
- accelerator = Accelerator()
266
- test_torch_metrics(accelerator, 512)
267
- accelerator.state._reset_state()
268
- if accelerator.is_local_main_process:
269
- print("**Test that `drop_last` is taken into account**")
270
- test_gather_for_metrics_drop_last()
271
- accelerator.state._reset_state()
272
-
273
-
274
- def _mp_fn(index):
275
- # For xla_spawn (TPUs)
276
- main()
277
-
278
-
279
- if __name__ == "__main__":
280
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/test_utils/scripts/external_deps/test_peak_memory_usage.py DELETED
@@ -1,265 +0,0 @@
1
- # coding=utf-8
2
-
3
- import argparse
4
- import gc
5
- import json
6
- import os
7
-
8
- import torch
9
- from datasets import load_dataset
10
- from torch.optim import AdamW
11
- from torch.utils.data import DataLoader
12
- from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
13
-
14
- from accelerate import Accelerator, DistributedType
15
- from accelerate.utils import is_npu_available, is_xpu_available
16
- from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
17
-
18
-
19
- MAX_GPU_BATCH_SIZE = 16
20
- EVAL_BATCH_SIZE = 32
21
-
22
-
23
- # Converting Bytes to Megabytes
24
- def b2mb(x):
25
- return int(x / 2**20)
26
-
27
-
28
- # This context manager is used to track the peak memory usage of the process
29
- class TorchTracemalloc:
30
- def __enter__(self):
31
- gc.collect()
32
- if torch.cuda.is_available():
33
- torch.cuda.empty_cache()
34
- torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
35
- self.begin = torch.cuda.memory_allocated()
36
- elif is_npu_available():
37
- torch.npu.empty_cache()
38
- torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero
39
- self.begin = torch.npu.memory_allocated()
40
- elif is_xpu_available():
41
- torch.xpu.empty_cache()
42
- torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero
43
- self.begin = torch.xpu.memory_allocated()
44
- return self
45
-
46
- def __exit__(self, *exc):
47
- gc.collect()
48
- if torch.cuda.is_available():
49
- torch.cuda.empty_cache()
50
- self.end = torch.cuda.memory_allocated()
51
- self.peak = torch.cuda.max_memory_allocated()
52
- elif is_npu_available():
53
- torch.npu.empty_cache()
54
- self.end = torch.npu.memory_allocated()
55
- self.peak = torch.npu.max_memory_allocated()
56
- elif is_xpu_available():
57
- torch.xpu.empty_cache()
58
- self.end = torch.xpu.memory_allocated()
59
- self.peak = torch.xpu.max_memory_allocated()
60
- self.used = b2mb(self.end - self.begin)
61
- self.peaked = b2mb(self.peak - self.begin)
62
- # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
63
-
64
-
65
- def get_dataloaders(
66
- accelerator: Accelerator,
67
- batch_size: int = 16,
68
- model_name: str = "bert-base-cased",
69
- n_train: int = 320,
70
- n_val: int = 160,
71
- ):
72
- """
73
- Creates a set of `DataLoader`s for the `glue` dataset.
74
-
75
- Args:
76
- accelerator (`Accelerator`):
77
- An `Accelerator` object
78
- batch_size (`int`, *optional*):
79
- The batch size for the train and validation DataLoaders.
80
- model_name (`str`, *optional*):
81
- The name of the model to use.
82
- n_train (`int`, *optional*):
83
- The number of training examples to use.
84
- n_val (`int`, *optional*):
85
- The number of validation examples to use.
86
- """
87
- tokenizer = AutoTokenizer.from_pretrained(model_name)
88
- datasets = load_dataset(
89
- "glue", "mrpc", split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"}
90
- )
91
-
92
- def tokenize_function(examples):
93
- # max_length=None => use the model max length (it's actually the default)
94
- outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
95
- return outputs
96
-
97
- # Apply the method we just defined to all the examples in all the splits of the dataset
98
- tokenized_datasets = datasets.map(
99
- tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
100
- )
101
-
102
- # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
103
- # transformers library
104
- tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
105
-
106
- def collate_fn(examples):
107
- # On TPU it's best to pad everything to the same length or training will be very slow.
108
- if accelerator.distributed_type == DistributedType.TPU:
109
- return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
110
- return tokenizer.pad(examples, padding="longest", return_tensors="pt")
111
-
112
- # Instantiate dataloaders.
113
- train_dataloader = DataLoader(
114
- tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
115
- )
116
- eval_dataloader = DataLoader(
117
- tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
118
- )
119
-
120
- return train_dataloader, eval_dataloader
121
-
122
-
123
- def training_function(config, args):
124
- # Initialize accelerator
125
- accelerator = Accelerator()
126
-
127
- # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
128
- lr = config["lr"]
129
- num_epochs = int(config["num_epochs"])
130
- seed = int(config["seed"])
131
- batch_size = int(config["batch_size"])
132
- model_name = args.model_name_or_path
133
-
134
- set_seed(seed)
135
- train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name, args.n_train, args.n_val)
136
-
137
- # Instantiate the model (we build the model here so that the seed also control new weights initialization)
138
- model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
139
-
140
- # Instantiate optimizer
141
- optimizer_cls = (
142
- AdamW
143
- if accelerator.state.deepspeed_plugin is None
144
- or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
145
- else DummyOptim
146
- )
147
- optimizer = optimizer_cls(params=model.parameters(), lr=lr)
148
-
149
- if accelerator.state.deepspeed_plugin is not None:
150
- gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
151
- "gradient_accumulation_steps"
152
- ]
153
- else:
154
- gradient_accumulation_steps = 1
155
- max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
156
-
157
- # Instantiate scheduler
158
- if (
159
- accelerator.state.deepspeed_plugin is None
160
- or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
161
- ):
162
- lr_scheduler = get_linear_schedule_with_warmup(
163
- optimizer=optimizer,
164
- num_warmup_steps=0,
165
- num_training_steps=max_training_steps,
166
- )
167
- else:
168
- lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
169
-
170
- # Prepare everything
171
- # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
172
- # prepare method.
173
- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
174
- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
175
- )
176
-
177
- # We need to keep track of how many total steps we have iterated over
178
- overall_step = 0
179
- # We also need to keep track of the stating epoch so files are named properly
180
- starting_epoch = 0
181
-
182
- # Now we train the model
183
- train_total_peak_memory = {}
184
- for epoch in range(starting_epoch, num_epochs):
185
- with TorchTracemalloc() as tracemalloc:
186
- model.train()
187
- for step, batch in enumerate(train_dataloader):
188
- outputs = model(**batch)
189
- loss = outputs.loss
190
- loss = loss / gradient_accumulation_steps
191
- accelerator.backward(loss)
192
- if step % gradient_accumulation_steps == 0:
193
- optimizer.step()
194
- lr_scheduler.step()
195
- optimizer.zero_grad()
196
-
197
- overall_step += 1
198
-
199
- # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
200
- accelerator.print("Memory before entering the train : {}".format(b2mb(tracemalloc.begin)))
201
- accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used))
202
- accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked))
203
- accelerator.print(
204
- "Total Peak Memory consumed during the train (max): {}".format(
205
- tracemalloc.peaked + b2mb(tracemalloc.begin)
206
- )
207
- )
208
- train_total_peak_memory[f"epoch-{epoch}"] = tracemalloc.peaked + b2mb(tracemalloc.begin)
209
- if args.peak_memory_upper_bound is not None:
210
- assert (
211
- train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
212
- ), "Peak memory usage exceeded the upper bound"
213
-
214
- accelerator.wait_for_everyone()
215
- if accelerator.is_main_process:
216
- with open(os.path.join(args.output_dir, "peak_memory_utilization.json"), "w") as f:
217
- json.dump(train_total_peak_memory, f)
218
-
219
-
220
- def main():
221
- parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
222
- parser.add_argument(
223
- "--model_name_or_path",
224
- type=str,
225
- default="bert-base-cased",
226
- help="Path to pretrained model or model identifier from huggingface.co/models.",
227
- required=False,
228
- )
229
- parser.add_argument(
230
- "--output_dir",
231
- type=str,
232
- default=".",
233
- help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
234
- )
235
- parser.add_argument(
236
- "--peak_memory_upper_bound",
237
- type=float,
238
- default=None,
239
- help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.",
240
- )
241
- parser.add_argument(
242
- "--n_train",
243
- type=int,
244
- default=320,
245
- help="Number of training examples to use.",
246
- )
247
- parser.add_argument(
248
- "--n_val",
249
- type=int,
250
- default=160,
251
- help="Number of validation examples to use.",
252
- )
253
- parser.add_argument(
254
- "--num_epochs",
255
- type=int,
256
- default=1,
257
- help="Number of train epochs.",
258
- )
259
- args = parser.parse_args()
260
- config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
261
- training_function(config, args)
262
-
263
-
264
- if __name__ == "__main__":
265
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/test_utils/scripts/external_deps/test_performance.py DELETED
@@ -1,219 +0,0 @@
1
- # coding=utf-8
2
-
3
- import argparse
4
- import json
5
- import os
6
-
7
- import evaluate
8
- import torch
9
- from datasets import load_dataset
10
- from torch.optim import AdamW
11
- from torch.utils.data import DataLoader
12
- from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
13
-
14
- from accelerate import Accelerator, DistributedType
15
- from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
16
-
17
-
18
- MAX_GPU_BATCH_SIZE = 16
19
- EVAL_BATCH_SIZE = 32
20
-
21
-
22
- def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"):
23
- """
24
- Creates a set of `DataLoader`s for the `glue` dataset.
25
-
26
- Args:
27
- accelerator (`Accelerator`):
28
- An `Accelerator` object
29
- batch_size (`int`, *optional*):
30
- The batch size for the train and validation DataLoaders.
31
- model_name (`str`, *optional*):
32
- """
33
- tokenizer = AutoTokenizer.from_pretrained(model_name)
34
- datasets = load_dataset("glue", "mrpc")
35
-
36
- def tokenize_function(examples):
37
- # max_length=None => use the model max length (it's actually the default)
38
- outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
39
- return outputs
40
-
41
- # Apply the method we just defined to all the examples in all the splits of the dataset
42
- tokenized_datasets = datasets.map(
43
- tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
44
- )
45
-
46
- # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
47
- # transformers library
48
- tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
49
-
50
- def collate_fn(examples):
51
- # On TPU it's best to pad everything to the same length or training will be very slow.
52
- if accelerator.distributed_type == DistributedType.TPU:
53
- return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
54
- return tokenizer.pad(examples, padding="longest", return_tensors="pt")
55
-
56
- # Instantiate dataloaders.
57
- train_dataloader = DataLoader(
58
- tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
59
- )
60
- eval_dataloader = DataLoader(
61
- tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
62
- )
63
-
64
- return train_dataloader, eval_dataloader
65
-
66
-
67
- def training_function(config, args):
68
- # Initialize accelerator
69
- accelerator = Accelerator()
70
-
71
- # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
72
- lr = config["lr"]
73
- num_epochs = int(config["num_epochs"])
74
- seed = int(config["seed"])
75
- batch_size = int(config["batch_size"])
76
- model_name = args.model_name_or_path
77
-
78
- set_seed(seed)
79
- train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)
80
-
81
- # Instantiate the model (we build the model here so that the seed also control new weights initialization)
82
- model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
83
-
84
- # Instantiate optimizer
85
- optimizer_cls = (
86
- AdamW
87
- if accelerator.state.deepspeed_plugin is None
88
- or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
89
- else DummyOptim
90
- )
91
- optimizer = optimizer_cls(params=model.parameters(), lr=lr)
92
-
93
- if accelerator.state.deepspeed_plugin is not None:
94
- gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
95
- "gradient_accumulation_steps"
96
- ]
97
- else:
98
- gradient_accumulation_steps = 1
99
- max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
100
-
101
- # Instantiate scheduler
102
- if (
103
- accelerator.state.deepspeed_plugin is None
104
- or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
105
- ):
106
- lr_scheduler = get_linear_schedule_with_warmup(
107
- optimizer=optimizer,
108
- num_warmup_steps=0,
109
- num_training_steps=max_training_steps,
110
- )
111
- else:
112
- lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
113
-
114
- # Prepare everything
115
- # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
116
- # prepare method.
117
- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
118
- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
119
- )
120
-
121
- # We need to keep track of how many total steps we have iterated over
122
- overall_step = 0
123
- # We also need to keep track of the stating epoch so files are named properly
124
- starting_epoch = 0
125
-
126
- # Now we train the model
127
- metric = evaluate.load("glue", "mrpc")
128
- best_performance = 0
129
- performance_metric = {}
130
- for epoch in range(starting_epoch, num_epochs):
131
- model.train()
132
- for step, batch in enumerate(train_dataloader):
133
- outputs = model(**batch)
134
- loss = outputs.loss
135
- loss = loss / gradient_accumulation_steps
136
- accelerator.backward(loss)
137
- if step % gradient_accumulation_steps == 0:
138
- optimizer.step()
139
- lr_scheduler.step()
140
- optimizer.zero_grad()
141
-
142
- overall_step += 1
143
-
144
- model.eval()
145
- samples_seen = 0
146
- for step, batch in enumerate(eval_dataloader):
147
- # We could avoid this line since we set the accelerator with `device_placement=True`.
148
- batch.to(accelerator.device)
149
- with torch.no_grad():
150
- outputs = model(**batch)
151
- predictions = outputs.logits.argmax(dim=-1)
152
- # It is slightly faster to call this once, than multiple times
153
- predictions, references = accelerator.gather(
154
- (predictions, batch["labels"])
155
- ) # If we are in a multiprocess environment, the last batch has duplicates
156
- if accelerator.use_distributed:
157
- if step == len(eval_dataloader) - 1:
158
- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
159
- references = references[: len(eval_dataloader.dataset) - samples_seen]
160
- else:
161
- samples_seen += references.shape[0]
162
- metric.add_batch(
163
- predictions=predictions,
164
- references=references,
165
- )
166
-
167
- eval_metric = metric.compute()
168
- # Use accelerator.print to print only on the main process.
169
- accelerator.print(f"epoch {epoch}:", eval_metric)
170
- performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"]
171
-
172
- if best_performance < eval_metric["accuracy"]:
173
- best_performance = eval_metric["accuracy"]
174
-
175
- if args.performance_lower_bound is not None:
176
- assert (
177
- args.performance_lower_bound <= best_performance
178
- ), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
179
-
180
- accelerator.wait_for_everyone()
181
- if accelerator.is_main_process:
182
- with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
183
- json.dump(performance_metric, f)
184
-
185
-
186
- def main():
187
- parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
188
- parser.add_argument(
189
- "--model_name_or_path",
190
- type=str,
191
- default="bert-base-cased",
192
- help="Path to pretrained model or model identifier from huggingface.co/models.",
193
- required=False,
194
- )
195
- parser.add_argument(
196
- "--output_dir",
197
- type=str,
198
- default=".",
199
- help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
200
- )
201
- parser.add_argument(
202
- "--performance_lower_bound",
203
- type=float,
204
- default=None,
205
- help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.",
206
- )
207
- parser.add_argument(
208
- "--num_epochs",
209
- type=int,
210
- default=3,
211
- help="Number of train epochs.",
212
- )
213
- args = parser.parse_args()
214
- config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
215
- training_function(config, args)
216
-
217
-
218
- if __name__ == "__main__":
219
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/test_utils/scripts/test_cli.py DELETED
@@ -1,13 +0,0 @@
1
- import torch
2
-
3
-
4
- def main():
5
- if torch.cuda.is_available():
6
- num_gpus = torch.cuda.device_count()
7
- else:
8
- num_gpus = 0
9
- print(f"Successfully ran on {num_gpus} GPUs")
10
-
11
-
12
- if __name__ == "__main__":
13
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/test_utils/scripts/test_distributed_data_loop.py DELETED
@@ -1,226 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
-
4
-
5
-
6
- import warnings
7
- from typing import List
8
- from unittest.mock import Mock
9
-
10
- import torch
11
- from torch.utils.data import DataLoader, IterableDataset, TensorDataset
12
-
13
- from accelerate.accelerator import Accelerator
14
- from accelerate.utils.dataclasses import DistributedType
15
-
16
-
17
- class DummyIterableDataset(IterableDataset):
18
- def __init__(self, data):
19
- self.data = data
20
-
21
- def __iter__(self):
22
- for element in self.data:
23
- yield element
24
-
25
-
26
- def create_accelerator(even_batches=True):
27
- accelerator = Accelerator(even_batches=even_batches)
28
- assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
29
- return accelerator
30
-
31
-
32
- def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int, iterable: bool = False):
33
- """
34
- Create a simple DataLoader to use during the test cases
35
- """
36
- if iterable:
37
- dataset = DummyIterableDataset(torch.as_tensor(range(dataset_size)))
38
- else:
39
- dataset = TensorDataset(torch.as_tensor(range(dataset_size)))
40
-
41
- dl = DataLoader(dataset, batch_size=batch_size)
42
- dl = accelerator.prepare(dl)
43
-
44
- return dl
45
-
46
-
47
- def verify_dataloader_batch_sizes(
48
- accelerator: Accelerator,
49
- dataset_size: int,
50
- batch_size: int,
51
- process_0_expected_batch_sizes: List[int],
52
- process_1_expected_batch_sizes: List[int],
53
- ):
54
- """
55
- A helper function for verifying the batch sizes coming from a prepared dataloader in each process
56
- """
57
- dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size)
58
-
59
- batch_sizes = [len(batch[0]) for batch in dl]
60
-
61
- if accelerator.process_index == 0:
62
- assert batch_sizes == process_0_expected_batch_sizes
63
- elif accelerator.process_index == 1:
64
- assert batch_sizes == process_1_expected_batch_sizes
65
-
66
-
67
- def test_default_ensures_even_batch_sizes():
68
- accelerator = create_accelerator()
69
-
70
- # without padding, we would expect a different number of batches
71
- verify_dataloader_batch_sizes(
72
- accelerator,
73
- dataset_size=3,
74
- batch_size=1,
75
- process_0_expected_batch_sizes=[1, 1],
76
- process_1_expected_batch_sizes=[1, 1],
77
- )
78
-
79
- # without padding, we would expect the same number of batches, but different sizes
80
- verify_dataloader_batch_sizes(
81
- accelerator,
82
- dataset_size=7,
83
- batch_size=2,
84
- process_0_expected_batch_sizes=[2, 2],
85
- process_1_expected_batch_sizes=[2, 2],
86
- )
87
-
88
-
89
- def test_can_disable_even_batches():
90
- accelerator = create_accelerator(even_batches=False)
91
-
92
- verify_dataloader_batch_sizes(
93
- accelerator,
94
- dataset_size=3,
95
- batch_size=1,
96
- process_0_expected_batch_sizes=[1, 1],
97
- process_1_expected_batch_sizes=[1],
98
- )
99
-
100
- verify_dataloader_batch_sizes(
101
- accelerator,
102
- dataset_size=7,
103
- batch_size=2,
104
- process_0_expected_batch_sizes=[2, 2],
105
- process_1_expected_batch_sizes=[2, 1],
106
- )
107
-
108
-
109
- def test_can_join_uneven_inputs():
110
- accelerator = create_accelerator(even_batches=False)
111
-
112
- model = torch.nn.Linear(1, 1)
113
- ddp_model = accelerator.prepare(model)
114
-
115
- dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
116
-
117
- batch_idxs = []
118
- with accelerator.join_uneven_inputs([ddp_model]):
119
- for batch_idx, batch in enumerate(dl):
120
- output = ddp_model(batch[0].float())
121
- loss = output.sum()
122
- loss.backward()
123
- batch_idxs.append(batch_idx)
124
-
125
- accelerator.wait_for_everyone()
126
-
127
- if accelerator.process_index == 0:
128
- assert batch_idxs == [0, 1]
129
- elif accelerator.process_index == 1:
130
- assert batch_idxs == [0]
131
-
132
-
133
- def test_join_raises_warning_for_non_ddp_distributed(accelerator):
134
- with warnings.catch_warnings(record=True) as w:
135
- with accelerator.join_uneven_inputs([Mock()]):
136
- pass
137
-
138
- assert issubclass(w[-1].category, UserWarning)
139
- assert "only supported for multi-GPU" in str(w[-1].message)
140
-
141
-
142
- def test_join_can_override_even_batches():
143
- default_even_batches = True
144
- overridden_even_batches = False
145
- accelerator = create_accelerator(even_batches=default_even_batches)
146
- model = torch.nn.Linear(1, 1)
147
- ddp_model = accelerator.prepare(model)
148
- train_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
149
- valid_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
150
-
151
- with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches):
152
- train_dl_overridden_value = train_dl.batch_sampler.even_batches
153
- valid_dl_overridden_value = valid_dl.batch_sampler.even_batches
154
-
155
- assert train_dl_overridden_value == overridden_even_batches
156
- assert valid_dl_overridden_value == overridden_even_batches
157
- assert train_dl.batch_sampler.even_batches == default_even_batches
158
- assert valid_dl.batch_sampler.even_batches == default_even_batches
159
-
160
-
161
- def test_join_can_override_for_mixed_type_dataloaders():
162
- default_even_batches = True
163
- overridden_even_batches = False
164
- accelerator = create_accelerator(even_batches=default_even_batches)
165
- model = torch.nn.Linear(1, 1)
166
- ddp_model = accelerator.prepare(model)
167
- create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True)
168
- batch_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
169
-
170
- with warnings.catch_warnings():
171
- warnings.filterwarnings("ignore")
172
- try:
173
- with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches):
174
- batch_dl_overridden_value = batch_dl.batch_sampler.even_batches
175
- except AttributeError:
176
- # ensure attribute error is not raised when processing iterable dl
177
- raise AssertionError
178
-
179
- assert batch_dl_overridden_value == overridden_even_batches
180
- assert batch_dl.batch_sampler.even_batches == default_even_batches
181
-
182
-
183
- def test_join_raises_warning_for_iterable_when_overriding_even_batches():
184
- accelerator = create_accelerator()
185
- model = torch.nn.Linear(1, 1)
186
- ddp_model = accelerator.prepare(model)
187
- create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True)
188
-
189
- with warnings.catch_warnings(record=True) as w:
190
- with accelerator.join_uneven_inputs([ddp_model], even_batches=False):
191
- pass
192
-
193
- assert issubclass(w[-1].category, UserWarning)
194
- assert "only supported for map-style datasets" in str(w[-1].message)
195
-
196
-
197
- def main():
198
- accelerator = create_accelerator()
199
-
200
- accelerator.print("Test that even_batches variable ensures uniform batches across processes")
201
- test_default_ensures_even_batch_sizes()
202
-
203
- accelerator.print("Run tests with even_batches disabled")
204
- test_can_disable_even_batches()
205
-
206
- accelerator.print("Test joining uneven inputs")
207
- test_can_join_uneven_inputs()
208
-
209
- accelerator.print("Test overriding even_batches when joining uneven inputs")
210
- test_join_can_override_even_batches()
211
-
212
- accelerator.print("Test overriding even_batches for mixed dataloader types")
213
- test_join_can_override_for_mixed_type_dataloaders()
214
-
215
- accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders")
216
- test_join_raises_warning_for_iterable_when_overriding_even_batches()
217
-
218
- accelerator.print("Test join with non DDP distributed raises warning")
219
- original_state = accelerator.state.distributed_type
220
- accelerator.state.distributed_type = DistributedType.FSDP
221
- test_join_raises_warning_for_non_ddp_distributed(accelerator)
222
- accelerator.state.distributed_type = original_state
223
-
224
-
225
- if __name__ == "__main__":
226
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/test_utils/scripts/test_notebook.py DELETED
@@ -1,40 +0,0 @@
1
- # Test file to ensure that in general certain situational setups for notebooks work.
2
- import os
3
-
4
- from pytest import raises
5
-
6
- from accelerate import PartialState, notebook_launcher
7
- from accelerate.test_utils import require_bnb
8
- from accelerate.utils import is_bnb_available
9
-
10
-
11
- def basic_function():
12
- # Just prints the PartialState
13
- print(f"PartialState:\n{PartialState()}")
14
-
15
-
16
- NUM_PROCESSES = int(os.environ.get("ACCELERATE_NUM_PROCESSES", 1))
17
-
18
-
19
- def test_can_initialize():
20
- notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES)
21
-
22
-
23
- @require_bnb
24
- def test_problematic_imports():
25
- with raises(RuntimeError, match="Please keep these imports"):
26
- import bitsandbytes as bnb # noqa: F401
27
-
28
- notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES)
29
-
30
-
31
- def main():
32
- print("Test basic notebook can be ran")
33
- test_can_initialize()
34
- if is_bnb_available():
35
- print("Test problematic imports (bnb)")
36
- test_problematic_imports()
37
-
38
-
39
- if __name__ == "__main__":
40
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/test_utils/scripts/test_ops.py DELETED
@@ -1,147 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
-
4
-
5
- import torch
6
-
7
- from accelerate import PartialState
8
- from accelerate.test_utils.testing import assert_exception
9
- from accelerate.utils.dataclasses import DistributedType
10
- from accelerate.utils.operations import (
11
- DistributedOperationException,
12
- broadcast,
13
- gather,
14
- gather_object,
15
- pad_across_processes,
16
- reduce,
17
- )
18
-
19
-
20
- def create_tensor(state):
21
- return (torch.arange(state.num_processes) + 1.0 + (state.num_processes * state.process_index)).to(state.device)
22
-
23
-
24
- def test_gather(state):
25
- tensor = create_tensor(state)
26
- gathered_tensor = gather(tensor)
27
- assert gathered_tensor.tolist() == list(range(1, state.num_processes**2 + 1))
28
-
29
-
30
- def test_gather_object(state):
31
- obj = [state.process_index]
32
- gathered_obj = gather_object(obj)
33
- assert len(gathered_obj) == state.num_processes, f"{gathered_obj}, {len(gathered_obj)} != {state.num_processes}"
34
- assert gathered_obj == list(range(state.num_processes)), f"{gathered_obj} != {list(range(state.num_processes))}"
35
-
36
-
37
- def test_gather_non_contigous(state):
38
- # Create a non-contiguous tensor
39
- tensor = torch.arange(12).view(4, 3).t().to(state.device)
40
- assert not tensor.is_contiguous()
41
- # Shouldn't error out
42
- _ = gather(tensor)
43
-
44
-
45
- def test_broadcast(state):
46
- tensor = create_tensor(state)
47
- broadcasted_tensor = broadcast(tensor)
48
- assert broadcasted_tensor.shape == torch.Size([state.num_processes])
49
- assert broadcasted_tensor.tolist() == list(range(1, state.num_processes + 1))
50
-
51
-
52
- def test_pad_across_processes(state):
53
- # We need to pad the tensor with one more element if we are the main process
54
- # to ensure that we can pad
55
- if state.is_main_process:
56
- tensor = torch.arange(state.num_processes + 1).to(state.device)
57
- else:
58
- tensor = torch.arange(state.num_processes).to(state.device)
59
- padded_tensor = pad_across_processes(tensor)
60
- assert padded_tensor.shape == torch.Size([state.num_processes + 1])
61
- if not state.is_main_process:
62
- assert padded_tensor.tolist() == list(range(0, state.num_processes)) + [0]
63
-
64
-
65
- def test_reduce_sum(state):
66
- # For now runs on only two processes
67
- if state.num_processes != 2:
68
- return
69
- tensor = create_tensor(state)
70
- reduced_tensor = reduce(tensor, "sum")
71
- truth_tensor = torch.tensor([4.0, 6]).to(state.device)
72
- assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}"
73
-
74
-
75
- def test_reduce_mean(state):
76
- # For now runs on only two processes
77
- if state.num_processes != 2:
78
- return
79
- tensor = create_tensor(state)
80
- reduced_tensor = reduce(tensor, "mean")
81
- truth_tensor = torch.tensor([2.0, 3]).to(state.device)
82
- assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}"
83
-
84
-
85
- def test_op_checker(state):
86
- # Must be in a distributed state
87
- if state.distributed_type == DistributedType.NO:
88
- return
89
- state.debug = True
90
- # `pad_across_processes`
91
- if state.process_index == 0:
92
- data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)}
93
- else:
94
- data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4, 5]]]).to(state.device)}
95
-
96
- with assert_exception(DistributedOperationException):
97
- pad_across_processes(data, dim=0)
98
-
99
- # `reduce`
100
- if state.process_index == 0:
101
- data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)}
102
- else:
103
- data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)}
104
-
105
- with assert_exception(DistributedOperationException):
106
- reduce(data)
107
-
108
- # `broadcast`
109
- if state.process_index == 0:
110
- data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)}
111
- else:
112
- data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)}
113
-
114
- with assert_exception(DistributedOperationException):
115
- broadcast(data)
116
-
117
- state.debug = False
118
-
119
-
120
- def _mp_fn(index):
121
- # For xla_spawn (TPUs)
122
- main()
123
-
124
-
125
- def main():
126
- state = PartialState()
127
- state.print(f"State: {state}")
128
- state.print("testing gather")
129
- test_gather(state)
130
- state.print("testing gather_object")
131
- test_gather_object(state)
132
- state.print("testing gather non-contigous")
133
- test_gather_non_contigous(state)
134
- state.print("testing broadcast")
135
- test_broadcast(state)
136
- state.print("testing pad_across_processes")
137
- test_pad_across_processes(state)
138
- state.print("testing reduce_sum")
139
- test_reduce_sum(state)
140
- state.print("testing reduce_mean")
141
- test_reduce_mean(state)
142
- state.print("testing op_checker")
143
- test_op_checker(state)
144
-
145
-
146
- if __name__ == "__main__":
147
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/test_utils/scripts/test_script.py DELETED
@@ -1,660 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
-
4
-
5
- import contextlib
6
- import io
7
- import math
8
- import time
9
- from copy import deepcopy
10
- from pathlib import Path
11
-
12
- import numpy as np
13
- import torch
14
- from torch.utils.data import DataLoader, Dataset
15
-
16
- from accelerate import Accelerator
17
- from accelerate.data_loader import SeedableRandomSampler, prepare_data_loader
18
- from accelerate.state import AcceleratorState
19
- from accelerate.test_utils import RegressionDataset, are_the_same_tensors
20
- from accelerate.utils import (
21
- DistributedType,
22
- gather,
23
- is_bf16_available,
24
- is_ipex_available,
25
- is_npu_available,
26
- is_xpu_available,
27
- set_seed,
28
- synchronize_rng_states,
29
- )
30
-
31
-
32
- # TODO: remove RegressionModel4XPU once ccl support empty buffer in broadcasting.
33
- if is_xpu_available():
34
- from accelerate.test_utils import RegressionModel4XPU as RegressionModel
35
- else:
36
- from accelerate.test_utils import RegressionModel
37
-
38
-
39
- def print_main(state):
40
- print(f"Printing from the main process {state.process_index}")
41
-
42
-
43
- def print_local_main(state):
44
- print(f"Printing from the local main process {state.local_process_index}")
45
-
46
-
47
- def print_last(state):
48
- print(f"Printing from the last process {state.process_index}")
49
-
50
-
51
- def print_on(state, process_idx):
52
- print(f"Printing from process {process_idx}: {state.process_index}")
53
-
54
-
55
- def process_execution_check():
56
- accelerator = Accelerator()
57
- num_processes = accelerator.num_processes
58
- # Test main_process_first context manager
59
- path = Path("check_main_process_first.txt")
60
- with accelerator.main_process_first():
61
- if accelerator.is_main_process:
62
- time.sleep(0.1) # ensure main process takes longest
63
- with open(path, "a+") as f:
64
- f.write("Currently in the main process\n")
65
- else:
66
- with open(path, "a+") as f:
67
- f.write("Now on another process\n")
68
- accelerator.wait_for_everyone()
69
-
70
- if accelerator.is_main_process:
71
- with open(path, "r") as f:
72
- text = "".join(f.readlines())
73
- try:
74
- assert text.startswith("Currently in the main process\n"), "Main process was not first"
75
- if num_processes > 1:
76
- assert text.endswith("Now on another process\n"), "Main process was not first"
77
- assert (
78
- text.count("Now on another process\n") == accelerator.num_processes - 1
79
- ), f"Only wrote to file {text.count('Now on another process') + 1} times, not {accelerator.num_processes}"
80
- except AssertionError:
81
- path.unlink()
82
- raise
83
-
84
- if accelerator.is_main_process and path.exists():
85
- path.unlink()
86
- accelerator.wait_for_everyone()
87
- # Test the decorators
88
- f = io.StringIO()
89
- with contextlib.redirect_stdout(f):
90
- accelerator.on_main_process(print_main)(accelerator.state)
91
- result = f.getvalue().rstrip()
92
- if accelerator.is_main_process:
93
- assert result == "Printing from the main process 0", f"{result} != Printing from the main process 0"
94
- else:
95
- assert f.getvalue().rstrip() == "", f'{result} != ""'
96
- f.truncate(0)
97
- f.seek(0)
98
-
99
- with contextlib.redirect_stdout(f):
100
- accelerator.on_local_main_process(print_local_main)(accelerator.state)
101
- if accelerator.is_local_main_process:
102
- assert f.getvalue().rstrip() == "Printing from the local main process 0"
103
- else:
104
- assert f.getvalue().rstrip() == ""
105
- f.truncate(0)
106
- f.seek(0)
107
-
108
- with contextlib.redirect_stdout(f):
109
- accelerator.on_last_process(print_last)(accelerator.state)
110
- if accelerator.is_last_process:
111
- assert f.getvalue().rstrip() == f"Printing from the last process {accelerator.state.num_processes - 1}"
112
- else:
113
- assert f.getvalue().rstrip() == ""
114
- f.truncate(0)
115
- f.seek(0)
116
-
117
- for process_idx in range(num_processes):
118
- with contextlib.redirect_stdout(f):
119
- accelerator.on_process(print_on, process_index=process_idx)(accelerator.state, process_idx)
120
- if accelerator.process_index == process_idx:
121
- assert f.getvalue().rstrip() == f"Printing from process {process_idx}: {accelerator.process_index}"
122
- else:
123
- assert f.getvalue().rstrip() == ""
124
- f.truncate(0)
125
- f.seek(0)
126
-
127
-
128
- def init_state_check():
129
- # Test we can instantiate this twice in a row.
130
- state = AcceleratorState()
131
- if state.local_process_index == 0:
132
- print("Testing, testing. 1, 2, 3.")
133
- print(state)
134
-
135
-
136
- def rng_sync_check():
137
- state = AcceleratorState()
138
- synchronize_rng_states(["torch"])
139
- assert are_the_same_tensors(torch.get_rng_state()), "RNG states improperly synchronized on CPU."
140
- if state.distributed_type == DistributedType.MULTI_GPU:
141
- synchronize_rng_states(["cuda"])
142
- assert are_the_same_tensors(torch.cuda.get_rng_state()), "RNG states improperly synchronized on GPU."
143
- elif state.distributed_type == DistributedType.MULTI_XPU:
144
- synchronize_rng_states(["xpu"])
145
- assert are_the_same_tensors(torch.xpu.get_rng_state()), "RNG states improperly synchronized on XPU."
146
- generator = torch.Generator()
147
- synchronize_rng_states(["generator"], generator=generator)
148
- assert are_the_same_tensors(generator.get_state()), "RNG states improperly synchronized in generator."
149
-
150
- if state.local_process_index == 0:
151
- print("All rng are properly synched.")
152
-
153
-
154
- def dl_preparation_check():
155
- state = AcceleratorState()
156
- length = 32 * state.num_processes
157
-
158
- dl = DataLoader(range(length), batch_size=8)
159
- dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True)
160
- result = []
161
- for batch in dl:
162
- result.append(gather(batch))
163
- result = torch.cat(result)
164
-
165
- print(state.process_index, result, type(dl))
166
- assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result."
167
-
168
- dl = DataLoader(range(length), batch_size=8)
169
- dl = prepare_data_loader(
170
- dl,
171
- state.device,
172
- state.num_processes,
173
- state.process_index,
174
- put_on_device=True,
175
- split_batches=True,
176
- )
177
- result = []
178
- for batch in dl:
179
- result.append(gather(batch))
180
- result = torch.cat(result)
181
- assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result."
182
-
183
- if state.process_index == 0:
184
- print("Non-shuffled dataloader passing.")
185
-
186
- dl = DataLoader(range(length), batch_size=8, shuffle=True)
187
- dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True)
188
- result = []
189
- for batch in dl:
190
- result.append(gather(batch))
191
- result = torch.cat(result).tolist()
192
- result.sort()
193
- assert result == list(range(length)), "Wrong shuffled dataloader result."
194
-
195
- dl = DataLoader(range(length), batch_size=8, shuffle=True)
196
- dl = prepare_data_loader(
197
- dl,
198
- state.device,
199
- state.num_processes,
200
- state.process_index,
201
- put_on_device=True,
202
- split_batches=True,
203
- )
204
- result = []
205
- for batch in dl:
206
- result.append(gather(batch))
207
- result = torch.cat(result).tolist()
208
- result.sort()
209
- assert result == list(range(length)), "Wrong shuffled dataloader result."
210
-
211
- if state.local_process_index == 0:
212
- print("Shuffled dataloader passing.")
213
-
214
-
215
- def central_dl_preparation_check():
216
- state = AcceleratorState()
217
- length = 32 * state.num_processes
218
-
219
- dl = DataLoader(range(length), batch_size=8)
220
- dl = prepare_data_loader(
221
- dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True
222
- )
223
- result = []
224
- for batch in dl:
225
- result.append(gather(batch))
226
- result = torch.cat(result)
227
- assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result."
228
-
229
- dl = DataLoader(range(length), batch_size=8)
230
- dl = prepare_data_loader(
231
- dl,
232
- state.device,
233
- state.num_processes,
234
- state.process_index,
235
- put_on_device=True,
236
- split_batches=True,
237
- dispatch_batches=True,
238
- )
239
- result = []
240
- for batch in dl:
241
- result.append(gather(batch))
242
- result = torch.cat(result)
243
- assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result."
244
-
245
- if state.process_index == 0:
246
- print("Non-shuffled central dataloader passing.")
247
-
248
- dl = DataLoader(range(length), batch_size=8, shuffle=True)
249
- dl = prepare_data_loader(
250
- dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True
251
- )
252
- result = []
253
- for batch in dl:
254
- result.append(gather(batch))
255
- result = torch.cat(result).tolist()
256
- result.sort()
257
- assert result == list(range(length)), "Wrong shuffled dataloader result."
258
-
259
- dl = DataLoader(range(length), batch_size=8, shuffle=True)
260
- dl = prepare_data_loader(
261
- dl,
262
- state.device,
263
- state.num_processes,
264
- state.process_index,
265
- put_on_device=True,
266
- split_batches=True,
267
- dispatch_batches=True,
268
- )
269
- result = []
270
- for batch in dl:
271
- result.append(gather(batch))
272
- result = torch.cat(result).tolist()
273
- result.sort()
274
- assert result == list(range(length)), "Wrong shuffled dataloader result."
275
-
276
- if state.local_process_index == 0:
277
- print("Shuffled central dataloader passing.")
278
-
279
-
280
- def custom_sampler_check():
281
- state = AcceleratorState()
282
-
283
- class CustomDataset(Dataset):
284
- def __init__(self, data):
285
- self.data = data
286
-
287
- def __len__(self):
288
- return len(self.data)
289
-
290
- def __getitem__(self, index):
291
- return self.data[index]
292
-
293
- class CustomBatchSampler:
294
- def __init__(self, dataset_length: int, batch_size: int, shuffle: bool = True):
295
- self.batch_size = batch_size
296
- self.data_index = np.arange(dataset_length)
297
- self.shuffle = shuffle
298
-
299
- def __iter__(self):
300
- num_batches = len(self)
301
- if self.shuffle:
302
- index = np.random.permutation(self.data_index)
303
- else:
304
- index = self.data_index
305
- output = np.array_split(index, num_batches)
306
- yield from output
307
-
308
- def __len__(self):
309
- return math.ceil(len(self.data_index) / self.batch_size)
310
-
311
- dataset = CustomDataset(range(32 * state.num_processes))
312
- sampler = CustomBatchSampler(len(dataset), batch_size=8)
313
- dl = DataLoader(dataset, batch_sampler=sampler)
314
- dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index)
315
- # We need just ensure that `dl.batch_sampler` (or `dl.batch_sampler.batch_sampler` is indeed the old batch sampler
316
- if hasattr(dl.batch_sampler, "batch_sampler"):
317
- assert isinstance(
318
- dl.batch_sampler.batch_sampler, CustomBatchSampler
319
- ), "Custom sampler was changed after calling `prepare_data_loader`"
320
- else:
321
- assert isinstance(
322
- dl.batch_sampler, CustomBatchSampler
323
- ), "Custom sampler was changed after calling `prepare_data_loader`"
324
-
325
-
326
- def mock_training(length, batch_size, generator):
327
- set_seed(42)
328
- generator.manual_seed(42)
329
- train_set = RegressionDataset(length=length, seed=42)
330
-
331
- # The SeedableRandomSampler is needed during distributed setups
332
- # for full reproducability across processes with the `DataLoader`
333
- sampler = SeedableRandomSampler(
334
- generator=generator,
335
- data_source=train_set,
336
- num_samples=len(train_set),
337
- )
338
- train_dl = DataLoader(train_set, batch_size=batch_size, sampler=sampler)
339
- model = RegressionModel()
340
- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
341
- for epoch in range(3):
342
- for batch in train_dl:
343
- model.zero_grad()
344
- output = model(batch["x"])
345
- loss = torch.nn.functional.mse_loss(output, batch["y"])
346
- loss.backward()
347
- optimizer.step()
348
- return train_set, model
349
-
350
-
351
- def training_check():
352
- state = AcceleratorState()
353
- generator = torch.Generator()
354
- batch_size = 8
355
- length = batch_size * 4 * state.num_processes
356
-
357
- train_set, old_model = mock_training(length, batch_size * state.num_processes, generator)
358
- assert are_the_same_tensors(old_model.a), "Did not obtain the same model on both processes."
359
- assert are_the_same_tensors(old_model.b), "Did not obtain the same model on both processes."
360
-
361
- accelerator = Accelerator()
362
- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
363
- model = RegressionModel()
364
- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
365
-
366
- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
367
- set_seed(42)
368
- generator.manual_seed(42)
369
- for epoch in range(3):
370
- for batch in train_dl:
371
- model.zero_grad()
372
- output = model(batch["x"])
373
- loss = torch.nn.functional.mse_loss(output, batch["y"])
374
- accelerator.backward(loss)
375
- optimizer.step()
376
-
377
- model = accelerator.unwrap_model(model).cpu()
378
- assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
379
- assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
380
-
381
- accelerator.print("Training yielded the same results on one CPU or distributed setup with no batch split.")
382
-
383
- accelerator = Accelerator(split_batches=True)
384
- train_dl = DataLoader(train_set, batch_size=batch_size * state.num_processes, shuffle=True, generator=generator)
385
- model = RegressionModel()
386
- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
387
-
388
- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
389
- set_seed(42)
390
- generator.manual_seed(42)
391
- for _ in range(3):
392
- for batch in train_dl:
393
- model.zero_grad()
394
- output = model(batch["x"])
395
- loss = torch.nn.functional.mse_loss(output, batch["y"])
396
- accelerator.backward(loss)
397
- optimizer.step()
398
-
399
- model = accelerator.unwrap_model(model).cpu()
400
- assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
401
- assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
402
-
403
- accelerator.print("Training yielded the same results on one CPU or distributes setup with batch split.")
404
-
405
- if torch.cuda.is_available() or is_npu_available():
406
- # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16
407
- print("FP16 training check.")
408
- AcceleratorState._reset_state()
409
- accelerator = Accelerator(mixed_precision="fp16")
410
- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
411
- model = RegressionModel()
412
- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
413
-
414
- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
415
- set_seed(42)
416
- generator.manual_seed(42)
417
- for _ in range(3):
418
- for batch in train_dl:
419
- model.zero_grad()
420
- output = model(batch["x"])
421
- loss = torch.nn.functional.mse_loss(output, batch["y"])
422
- accelerator.backward(loss)
423
- optimizer.step()
424
-
425
- model = accelerator.unwrap_model(model).cpu()
426
- assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
427
- assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
428
-
429
- if torch.cuda.is_available():
430
- # Mostly a test that model.forward will have autocast when running unwrap_model(model, keep_fp32_wrapper=True)
431
- print("Keep fp32 wrapper check.")
432
- AcceleratorState._reset_state()
433
- accelerator = Accelerator(mixed_precision="fp16")
434
-
435
- model = torch.nn.Linear(2, 4)
436
- model = accelerator.prepare(model)
437
- model_with_fp32_wrapper = accelerator.unwrap_model(model, keep_fp32_wrapper=True)
438
-
439
- # Run forward with fp16 as input.
440
- # When the model is with mixed precision wrapper, no error will be raised.
441
- input_tensor = torch.Tensor([1, 2]).to(dtype=torch.float16, device=accelerator.device)
442
- output = model_with_fp32_wrapper(input_tensor)
443
-
444
- # BF16 support is only for CPU + TPU, and some GPU
445
- if is_bf16_available():
446
- # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16
447
- print("BF16 training check.")
448
- AcceleratorState._reset_state()
449
- accelerator = Accelerator(mixed_precision="bf16")
450
- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
451
- model = RegressionModel()
452
- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
453
-
454
- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
455
- set_seed(42)
456
- generator.manual_seed(42)
457
- for _ in range(3):
458
- for batch in train_dl:
459
- model.zero_grad()
460
- output = model(batch["x"])
461
- loss = torch.nn.functional.mse_loss(output, batch["y"])
462
- accelerator.backward(loss)
463
- optimizer.step()
464
-
465
- model = accelerator.unwrap_model(model).cpu()
466
- assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
467
- assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
468
-
469
- # IPEX support is only for CPU
470
- if is_ipex_available():
471
- print("ipex BF16 training check.")
472
- AcceleratorState._reset_state()
473
- accelerator = Accelerator(mixed_precision="bf16", cpu=True)
474
- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
475
- model = RegressionModel()
476
- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
477
-
478
- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
479
- set_seed(42)
480
- generator.manual_seed(42)
481
- for _ in range(3):
482
- for batch in train_dl:
483
- model.zero_grad()
484
- output = model(batch["x"])
485
- loss = torch.nn.functional.mse_loss(output, batch["y"])
486
- accelerator.backward(loss)
487
- optimizer.step()
488
-
489
- model = accelerator.unwrap_model(model).cpu()
490
- assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
491
- assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
492
-
493
- # XPU support is only for XPU
494
- if is_xpu_available():
495
- print("xpu BF16 training check.")
496
- AcceleratorState._reset_state()
497
- accelerator = Accelerator(mixed_precision="bf16", cpu=False)
498
- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
499
- model = RegressionModel()
500
- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
501
-
502
- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
503
- set_seed(42)
504
- generator.manual_seed(42)
505
- for _ in range(3):
506
- for batch in train_dl:
507
- model.zero_grad()
508
- output = model(batch["x"])
509
- loss = torch.nn.functional.mse_loss(output, batch["y"])
510
- accelerator.backward(loss)
511
- optimizer.step()
512
-
513
- model = accelerator.unwrap_model(model).cpu()
514
- assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on XPU or distributed training."
515
- assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on XPU or distributed training."
516
-
517
-
518
- def test_split_between_processes_list():
519
- state = AcceleratorState()
520
- data = list(range(0, 2 * state.num_processes))
521
- with state.split_between_processes(data) as results:
522
- assert (
523
- len(results) == 2
524
- ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}"
525
-
526
- data = list(range(0, (3 * state.num_processes) - 1))
527
- with state.split_between_processes(data, apply_padding=True) as results:
528
- if state.is_last_process:
529
- # Test that the last process gets the extra item(s)
530
- num_samples_per_device = math.ceil(len(data) / state.num_processes)
531
- assert (
532
- len(results) == num_samples_per_device
533
- ), f"Last process did not get the extra item(s). Process index: {state.process_index}; Length: {len(results)}"
534
- state.wait_for_everyone()
535
-
536
-
537
- def test_split_between_processes_nested_dict():
538
- state = AcceleratorState()
539
- a = [1, 2, 3, 4, 5, 6, 7, 8]
540
- b = ["a", "b", "c", "d", "e", "f", "g", "h"]
541
- c = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8])
542
- if state.num_processes in (1, 2, 4):
543
- data = {"a": a, "b": b, "c": c}
544
- data_copy = deepcopy(data)
545
- with state.split_between_processes(data) as results:
546
- if state.process_index == 0:
547
- assert results["a"] == data_copy["a"][: 8 // state.num_processes]
548
- elif state.num_processes == 2:
549
- assert results["a"] == data_copy["a"][4:]
550
- elif state.process_index == 3:
551
- # We return a list each time
552
- assert results["a"] == data_copy["a"][-2:], f'Expected: {data_copy["a"][-2]}, Actual: {results["a"]}'
553
- if state.process_index == 0:
554
- assert results["b"] == data_copy["b"][: 8 // state.num_processes]
555
- elif state.num_processes == 2:
556
- assert results["b"] == data_copy["b"][4:]
557
- elif state.process_index == 3:
558
- assert results["b"] == data_copy["b"][-2:]
559
- if state.process_index == 0:
560
- assert torch.allclose(
561
- results["c"], data_copy["c"][: 8 // state.num_processes]
562
- ), f"Did not obtain expected values on process 0, expected `{data['c'][:8 // state.num_processes]}`, received: {results['c']}"
563
- elif state.num_processes == 2:
564
- assert torch.allclose(
565
- results["c"], data_copy["c"][4:]
566
- ), f"Did not obtain expected values on process 2, expected `{data['c'][4:]}`, received: {results['c']}"
567
- elif state.process_index == 3:
568
- assert torch.allclose(
569
- results["c"], data_copy["c"][-2:]
570
- ), f"Did not obtain expected values on process 4, expected `{data['c'][-2:]}`, received: {results['c']}"
571
-
572
- state.wait_for_everyone()
573
-
574
-
575
- def test_split_between_processes_tensor():
576
- state = AcceleratorState()
577
- if state.num_processes > 1:
578
- data = torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]).to(state.device)
579
- with state.split_between_processes(data) as results:
580
- if state.process_index == 0:
581
- assert torch.allclose(results, torch.tensor([0, 1, 2, 3]).to(state.device))
582
- else:
583
- assert torch.allclose(results, torch.tensor([4, 5, 6, 7]).to(state.device))
584
- state.wait_for_everyone()
585
-
586
-
587
- def test_trigger():
588
- accelerator = Accelerator()
589
- # should start with being false
590
- assert accelerator.check_trigger() is False
591
-
592
- # set a breakpoint on the main process
593
- if accelerator.is_main_process:
594
- accelerator.set_trigger()
595
-
596
- # check it's been activated across all processes
597
- # calls `all_reduce` and triggers a sync
598
- assert accelerator.check_trigger() is True
599
-
600
- # check it's been reset after the sync
601
- assert accelerator.check_trigger() is False
602
-
603
-
604
- def main():
605
- accelerator = Accelerator()
606
- state = accelerator.state
607
- if state.local_process_index == 0:
608
- print("**Initialization**")
609
- init_state_check()
610
- state.wait_for_everyone()
611
-
612
- if state.distributed_type == DistributedType.MULTI_GPU:
613
- num_processes_per_node = torch.cuda.device_count()
614
- else:
615
- num_processes_per_node = state.num_processes
616
-
617
- # We only run this test on non-multinode
618
- if num_processes_per_node == state.num_processes:
619
- if state.process_index == 0:
620
- print("\n**Test process execution**")
621
- process_execution_check()
622
-
623
- if state.process_index == 0:
624
- print("\n**Test split between processes as a list**")
625
- test_split_between_processes_list()
626
-
627
- if state.process_index == 0:
628
- print("\n**Test split between processes as a dict**")
629
- test_split_between_processes_nested_dict()
630
-
631
- if state.process_index == 0:
632
- print("\n**Test split between processes as a tensor**")
633
- test_split_between_processes_tensor()
634
-
635
- if state.local_process_index == 0:
636
- print("\n**Test random number generator synchronization**")
637
- rng_sync_check()
638
-
639
- if state.local_process_index == 0:
640
- print("\n**DataLoader integration test**")
641
- dl_preparation_check()
642
- if state.distributed_type != DistributedType.TPU:
643
- central_dl_preparation_check()
644
- custom_sampler_check()
645
-
646
- # Trainings are not exactly the same in DeepSpeed and CPU mode
647
- if state.distributed_type == DistributedType.DEEPSPEED:
648
- return
649
-
650
- if state.local_process_index == 0:
651
- print("\n**Training integration test**")
652
- training_check()
653
-
654
- if state.local_process_index == 0:
655
- print("\n**Breakpoint trigger test**")
656
- test_trigger()
657
-
658
-
659
- if __name__ == "__main__":
660
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/test_utils/scripts/test_sync.py DELETED
@@ -1,355 +0,0 @@
1
-
2
-
3
- from copy import deepcopy
4
-
5
- import torch
6
- import torch.nn.functional as F
7
- from torch.optim import AdamW
8
- from torch.optim.lr_scheduler import LambdaLR
9
- from torch.utils.data import DataLoader
10
-
11
- from accelerate.accelerator import Accelerator
12
- from accelerate.state import GradientState
13
- from accelerate.test_utils import RegressionDataset, RegressionModel
14
- from accelerate.utils import DistributedType, is_torch_version, set_seed
15
-
16
-
17
- def check_model_parameters(model_a, model_b, did_step, iteration):
18
- for param, grad_param in zip(model_a.parameters(), model_b.parameters()):
19
- if not param.requires_grad:
20
- continue
21
- if not did_step:
22
- # Grads should not be in sync
23
- assert (
24
- torch.allclose(param.grad, grad_param.grad) is False
25
- ), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
26
- else:
27
- # Grads should be in sync
28
- assert (
29
- torch.allclose(param.grad, grad_param.grad) is True
30
- ), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
31
-
32
-
33
- def step_model(model, input, target, accelerator, do_backward=True):
34
- model.train()
35
- output = model(input)
36
- loss = F.mse_loss(output, target.to(output.device))
37
- if not do_backward:
38
- loss /= accelerator.gradient_accumulation_steps
39
- loss.backward()
40
- else:
41
- accelerator.backward(loss)
42
-
43
-
44
- def get_training_setup(accelerator, sched=False):
45
- "Returns everything needed to perform basic training"
46
- set_seed(42)
47
- model = RegressionModel()
48
- ddp_model = deepcopy(model)
49
- dset = RegressionDataset(length=80)
50
- dataloader = DataLoader(dset, batch_size=16)
51
- model.to(accelerator.device)
52
- if sched:
53
- opt = AdamW(params=model.parameters(), lr=1e-3)
54
- ddp_opt = AdamW(params=ddp_model.parameters(), lr=1e-3)
55
- sched = LambdaLR(opt, lr_lambda=lambda epoch: epoch**0.65)
56
- ddp_sched = LambdaLR(ddp_opt, lr_lambda=lambda epoch: epoch**0.65)
57
- # Make a copy of `model`
58
- if sched:
59
- ddp_model, ddp_opt, ddp_sched, dataloader = accelerator.prepare(ddp_model, ddp_opt, ddp_sched, dataloader)
60
- else:
61
- ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)
62
- if sched:
63
- return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
64
- return model, ddp_model, dataloader
65
-
66
-
67
- def test_noop_sync(accelerator):
68
- # Test when on a single CPU or GPU that the context manager does nothing
69
- model, ddp_model, dataloader = get_training_setup(accelerator)
70
- # Use a single batch
71
- ddp_input, ddp_target = next(iter(dataloader)).values()
72
- for iteration in range(3):
73
- # Gather the distributed inputs and targs for the base model
74
- input, target = accelerator.gather((ddp_input, ddp_target))
75
- input, target = input.to(accelerator.device), target.to(accelerator.device)
76
- # Perform our initial ground truth step in non "DDP"
77
- step_model(model, input, target, accelerator)
78
- # Do "gradient accumulation" (noop)
79
- if iteration % 2 == 0:
80
- # Accumulate grads locally
81
- with accelerator.no_sync(ddp_model):
82
- step_model(ddp_model, ddp_input, ddp_target, accelerator)
83
- else:
84
- # Sync grads
85
- step_model(ddp_model, ddp_input, ddp_target, accelerator)
86
-
87
- # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
88
- check_model_parameters(model, ddp_model, True, iteration)
89
- for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
90
- if not param.requires_grad:
91
- continue
92
- assert torch.allclose(
93
- param.grad, ddp_param.grad
94
- ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
95
-
96
- # Shuffle ddp_input on each iteration
97
- torch.manual_seed(1337 + iteration)
98
- ddp_input = ddp_input[torch.randperm(len(ddp_input))]
99
-
100
-
101
- def test_distributed_sync(accelerator):
102
- # Test on distributed setup that context manager behaves properly
103
- model, ddp_model, dataloader = get_training_setup(accelerator)
104
- # Use a single batch
105
- ddp_input, ddp_target = next(iter(dataloader)).values()
106
- for iteration in range(3):
107
- # Gather the distributed inputs and targs for the base model
108
- input, target = accelerator.gather((ddp_input, ddp_target))
109
- input, target = input.to(accelerator.device), target.to(accelerator.device)
110
- # Perform our initial ground truth step in non "DDP"
111
- step_model(model, input, target, accelerator)
112
- # Do "gradient accumulation" (noop)
113
- if iteration % 2 == 0:
114
- # Accumulate grads locally
115
- with accelerator.no_sync(ddp_model):
116
- step_model(ddp_model, ddp_input, ddp_target, accelerator)
117
- else:
118
- # Sync grads
119
- step_model(ddp_model, ddp_input, ddp_target, accelerator)
120
-
121
- # DDP model and model should only be in sync when not (iteration % 2 == 0)
122
- for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
123
- if not param.requires_grad:
124
- continue
125
- if iteration % 2 == 0:
126
- # Grads should not be in sync
127
- assert (
128
- torch.allclose(param.grad, ddp_param.grad) is False
129
- ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
130
- else:
131
- # Grads should be in sync
132
- assert (
133
- torch.allclose(param.grad, ddp_param.grad) is True
134
- ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
135
-
136
- # Shuffle ddp_input on each iteration
137
- torch.manual_seed(1337 + iteration)
138
- ddp_input = ddp_input[torch.randperm(len(ddp_input))]
139
-
140
-
141
- def test_distributed_sync_multiple_fwd(accelerator):
142
- # Test on distributed setup that context manager behaves properly when used with multiple forwards followed by multiple backwards
143
- model, ddp_model, dataloader = get_training_setup(accelerator)
144
- # Do multiple forwards
145
- losses = []
146
- num_iterations = 3
147
- for iteration in range(num_iterations):
148
- ddp_input, ddp_target = next(iter(dataloader)).values()
149
-
150
- # Gather the distributed inputs and targs for the base model
151
- input, target = accelerator.gather((ddp_input, ddp_target))
152
- input, target = input.to(accelerator.device), target.to(accelerator.device)
153
-
154
- # Perform our initial ground truth step in non "DDP"
155
- step_model(model, input, target, accelerator)
156
-
157
- # Accumulate grads locally
158
- with accelerator.no_sync(ddp_model):
159
- ddp_output = ddp_model(ddp_input)
160
- loss = F.mse_loss(ddp_output, ddp_target.to(ddp_output.device))
161
- losses.append(loss)
162
-
163
- # Do multiple backwards and sync only at the last backward
164
- for iteration in range(num_iterations):
165
- loss = losses[iteration]
166
-
167
- if iteration < num_iterations - 1:
168
- # Accumulate grads locally
169
- accelerator.backward(loss)
170
-
171
- # DDP model and model should only be in sync after last backward
172
- for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
173
- if not param.requires_grad:
174
- continue
175
- # Grads should not be in sync
176
- assert (
177
- torch.allclose(param.grad, ddp_param.grad) is False
178
- ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
179
-
180
- else:
181
- # Sync grads if last backward
182
- with accelerator.trigger_sync_in_backward(ddp_model):
183
- accelerator.backward(loss)
184
-
185
- # DDP model and model should only be in sync after last backward
186
- for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
187
- if not param.requires_grad:
188
- continue
189
- # Grads should be in sync
190
- assert (
191
- torch.allclose(param.grad, ddp_param.grad) is True
192
- ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
193
-
194
-
195
- def test_gradient_accumulation(split_batches=False, dispatch_batches=False):
196
- accelerator = Accelerator(
197
- split_batches=split_batches, dispatch_batches=dispatch_batches, gradient_accumulation_steps=2
198
- )
199
- # Test that context manager behaves properly
200
- model, ddp_model, dataloader = get_training_setup(accelerator)
201
- for iteration, batch in enumerate(dataloader):
202
- ddp_input, ddp_target = batch.values()
203
- # Gather the distributed inputs and targs for the base model
204
- input, target = accelerator.gather((ddp_input, ddp_target))
205
- input, target = input.to(accelerator.device), target.to(accelerator.device)
206
- # Perform our initial ground truth step in non "DDP"
207
- step_model(model, input, target, accelerator, False)
208
- # Do "gradient accumulation" (noop)
209
- with accelerator.accumulate(ddp_model):
210
- step_model(ddp_model, ddp_input, ddp_target, accelerator)
211
-
212
- # DDP model and model should only be in sync when not (iteration % 2 == 0)
213
- for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
214
- if not param.requires_grad:
215
- continue
216
- if ((iteration + 1) % 2 == 0) or (iteration == len(dataloader) - 1):
217
- # Grads should be in sync
218
- assert (
219
- torch.allclose(param.grad, ddp_param.grad) is True
220
- ), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
221
- else:
222
- # Grads should not be in sync
223
- assert (
224
- torch.allclose(param.grad, ddp_param.grad) is False
225
- ), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
226
-
227
- # Shuffle ddp_input on each iteration
228
- torch.manual_seed(1337 + iteration)
229
- ddp_input = ddp_input[torch.randperm(len(ddp_input))]
230
- GradientState._reset_state()
231
-
232
-
233
- def test_gradient_accumulation_with_opt_and_scheduler(split_batches=False, dispatch_batches=False):
234
- accelerator = Accelerator(
235
- split_batches=split_batches, dispatch_batches=dispatch_batches, gradient_accumulation_steps=2
236
- )
237
- # Test that context manager behaves properly
238
- model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched = get_training_setup(accelerator, True)
239
- for iteration, batch in enumerate(dataloader):
240
- ddp_input, ddp_target = batch.values()
241
- # Gather the distributed inputs and targs for the base model
242
- input, target = accelerator.gather((ddp_input, ddp_target))
243
- input, target = input.to(accelerator.device), target.to(accelerator.device)
244
- # Perform our initial ground truth step in non "DDP"
245
- model.train()
246
- ddp_model.train()
247
- step_model(model, input, target, accelerator, False)
248
- opt.step()
249
-
250
- if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(dataloader)):
251
- if split_batches:
252
- sched.step()
253
- else:
254
- for _ in range(accelerator.num_processes):
255
- sched.step()
256
- opt.zero_grad()
257
- # Perform gradient accumulation under wrapper
258
- with accelerator.accumulate(ddp_model):
259
- step_model(ddp_model, ddp_input, ddp_target, accelerator)
260
- ddp_opt.step()
261
- ddp_sched.step()
262
- ddp_opt.zero_grad()
263
-
264
- # Learning rates should be the same
265
- assert (
266
- opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
267
- ), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
268
- did_step = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(dataloader))
269
- if accelerator.num_processes > 1:
270
- check_model_parameters(model, ddp_model, did_step, iteration)
271
- # Shuffle ddp_input on each iteration
272
- torch.manual_seed(1337 + iteration)
273
- GradientState._reset_state()
274
-
275
-
276
- def test_dataloader_break():
277
- accelerator = Accelerator()
278
-
279
- first_dset = RegressionDataset(length=80)
280
- first_dataloader = DataLoader(first_dset, batch_size=16)
281
- second_dset = RegressionDataset(length=96)
282
- second_dataloader = DataLoader(second_dset, batch_size=16)
283
- first_dataloader, second_dataloader = accelerator.prepare(first_dataloader, second_dataloader)
284
- assert accelerator.gradient_state.active_dataloader is None
285
- for iteration, _ in enumerate(first_dataloader):
286
- assert id(accelerator.gradient_state.active_dataloader) == id(first_dataloader)
287
- if iteration < len(first_dataloader) - 1:
288
- assert not accelerator.gradient_state.end_of_dataloader
289
- if iteration == 1:
290
- for batch_num, _ in enumerate(second_dataloader):
291
- assert id(accelerator.gradient_state.active_dataloader) == id(second_dataloader)
292
- if batch_num < len(second_dataloader) - 1:
293
- assert not accelerator.gradient_state.end_of_dataloader
294
- else:
295
- assert accelerator.gradient_state.end_of_dataloader
296
- else:
297
- assert accelerator.gradient_state.end_of_dataloader
298
- assert accelerator.gradient_state.active_dataloader is None
299
-
300
-
301
- def main():
302
- accelerator = Accelerator()
303
- state = accelerator.state
304
- if state.local_process_index == 0:
305
- print("**Test `accumulate` gradient accumulation with dataloader break**")
306
- test_dataloader_break()
307
- if state.distributed_type == DistributedType.NO:
308
- if state.local_process_index == 0:
309
- print("**Test NOOP `no_sync` context manager**")
310
- test_noop_sync(accelerator)
311
- if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_CPU):
312
- if state.local_process_index == 0:
313
- print("**Test Distributed `no_sync` context manager**")
314
- test_distributed_sync(accelerator)
315
- if state.local_process_index == 0:
316
- print("**Test Distributed `no_sync` context manager with multiple forwards**")
317
- test_distributed_sync_multiple_fwd(accelerator)
318
- if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU):
319
- for split_batch in [True, False]:
320
- for dispatch_batches in [True, False]:
321
- if state.local_process_index == 0:
322
- print(
323
- "**Test `accumulate` gradient accumulation, ",
324
- f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**",
325
- )
326
- test_gradient_accumulation(split_batch, dispatch_batches)
327
-
328
- # Currently will break on torch 2.0 +, need to investigate why
329
- if is_torch_version("<", "2.0") or state.distributed_type == DistributedType.NO:
330
- if state.local_process_index == 0:
331
- print(
332
- "**Test `accumulate` gradient accumulation with optimizer and scheduler, ",
333
- "`split_batches=False`, `dispatch_batches=False`**",
334
- )
335
- test_gradient_accumulation_with_opt_and_scheduler()
336
- if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU):
337
- for split_batch in [True, False]:
338
- for dispatch_batches in [True, False]:
339
- if not split_batch and not dispatch_batches:
340
- continue
341
- if state.local_process_index == 0:
342
- print(
343
- "**Test `accumulate` gradient accumulation with optimizer and scheduler, ",
344
- f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**",
345
- )
346
- test_gradient_accumulation_with_opt_and_scheduler(split_batch, dispatch_batches)
347
-
348
-
349
- def _mp_fn(index):
350
- # For xla_spawn (TPUs)
351
- main()
352
-
353
-
354
- if __name__ == "__main__":
355
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/test_utils/testing.py DELETED
@@ -1,501 +0,0 @@
1
-
2
-
3
- import asyncio
4
- import os
5
- import shutil
6
- import subprocess
7
- import sys
8
- import tempfile
9
- import unittest
10
- from contextlib import contextmanager
11
- from functools import partial
12
- from pathlib import Path
13
- from typing import List, Union
14
- from unittest import mock
15
-
16
- import torch
17
-
18
- from ..state import AcceleratorState, PartialState
19
- from ..utils import (
20
- gather,
21
- is_bnb_available,
22
- is_clearml_available,
23
- is_comet_ml_available,
24
- is_cuda_available,
25
- is_datasets_available,
26
- is_deepspeed_available,
27
- is_dvclive_available,
28
- is_mps_available,
29
- is_npu_available,
30
- is_pandas_available,
31
- is_tensorboard_available,
32
- is_timm_available,
33
- is_torch_version,
34
- is_tpu_available,
35
- is_transformers_available,
36
- is_wandb_available,
37
- is_xpu_available,
38
- str_to_bool,
39
- )
40
-
41
-
42
- def get_backend():
43
- if is_cuda_available():
44
- return "cuda", torch.cuda.device_count()
45
- elif is_mps_available():
46
- return "mps", 1
47
- elif is_npu_available():
48
- return "npu", torch.npu.device_count()
49
- elif is_xpu_available():
50
- return "xpu", torch.xpu.device_count()
51
- else:
52
- return "cpu", 1
53
-
54
-
55
- torch_device, device_count = get_backend()
56
-
57
-
58
- def parse_flag_from_env(key, default=False):
59
- try:
60
- value = os.environ[key]
61
- except KeyError:
62
- # KEY isn't set, default to `default`.
63
- _value = default
64
- else:
65
- # KEY is set, convert it to True or False.
66
- try:
67
- _value = str_to_bool(value)
68
- except ValueError:
69
- # More values are supported, but let's keep the message simple.
70
- raise ValueError(f"If set, {key} must be yes or no.")
71
- return _value
72
-
73
-
74
- _run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
75
-
76
-
77
- def skip(test_case):
78
- "Decorator that skips a test unconditionally"
79
- return unittest.skip("Test was skipped")(test_case)
80
-
81
-
82
- def slow(test_case):
83
- """
84
- Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a
85
- truthy value to run them.
86
- """
87
- return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
88
-
89
-
90
- def require_cpu(test_case):
91
- """
92
- Decorator marking a test that must be only ran on the CPU. These tests are skipped when a GPU is available.
93
- """
94
- return unittest.skipUnless(torch_device == "cpu", "test requires only a CPU")(test_case)
95
-
96
-
97
- def require_non_cpu(test_case):
98
- """
99
- Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no
100
- hardware accelerator available.
101
- """
102
- return unittest.skipUnless(torch_device != "cpu", "test requires a GPU")(test_case)
103
-
104
-
105
- def require_cuda(test_case):
106
- """
107
- Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available.
108
- """
109
- return unittest.skipUnless(is_cuda_available(), "test requires a GPU")(test_case)
110
-
111
-
112
- def require_xpu(test_case):
113
- """
114
- Decorator marking a test that requires XPU. These tests are skipped when there are no XPU available.
115
- """
116
- return unittest.skipUnless(is_xpu_available(), "test requires a XPU")(test_case)
117
-
118
-
119
- def require_mps(test_case):
120
- """
121
- Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn't support `mps`
122
- backend.
123
- """
124
- return unittest.skipUnless(is_mps_available(), "test requires a `mps` backend support in `torch`")(test_case)
125
-
126
-
127
- def require_huggingface_suite(test_case):
128
- """
129
- Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not.
130
- """
131
- return unittest.skipUnless(
132
- is_transformers_available() and is_datasets_available(), "test requires the Hugging Face suite"
133
- )(test_case)
134
-
135
-
136
- def require_transformers(test_case):
137
- """
138
- Decorator marking a test that requires transformers. These tests are skipped when they are not.
139
- """
140
- return unittest.skipUnless(is_transformers_available(), "test requires the transformers library")(test_case)
141
-
142
-
143
- def require_timm(test_case):
144
- """
145
- Decorator marking a test that requires transformers. These tests are skipped when they are not.
146
- """
147
- return unittest.skipUnless(is_timm_available(), "test requires the timm library")(test_case)
148
-
149
-
150
- def require_bnb(test_case):
151
- """
152
- Decorator marking a test that requires bitsandbytes. These tests are skipped when they are not.
153
- """
154
- return unittest.skipUnless(is_bnb_available(), "test requires the bitsandbytes library")(test_case)
155
-
156
-
157
- def require_tpu(test_case):
158
- """
159
- Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available.
160
- """
161
- return unittest.skipUnless(is_tpu_available(), "test requires TPU")(test_case)
162
-
163
-
164
- def require_single_device(test_case):
165
- """
166
- Decorator marking a test that requires a single device. These tests are skipped when there is no hardware
167
- accelerator available or number of devices is more than one.
168
- """
169
- return unittest.skipUnless(torch_device != "cpu" and device_count == 1, "test requires a hardware accelerator")(
170
- test_case
171
- )
172
-
173
-
174
- def require_single_gpu(test_case):
175
- """
176
- Decorator marking a test that requires CUDA on a single GPU. These tests are skipped when there are no GPU
177
- available or number of GPUs is more than one.
178
- """
179
- return unittest.skipUnless(torch.cuda.device_count() == 1, "test requires a GPU")(test_case)
180
-
181
-
182
- def require_single_xpu(test_case):
183
- """
184
- Decorator marking a test that requires CUDA on a single XPU. These tests are skipped when there are no XPU
185
- available or number of xPUs is more than one.
186
- """
187
- return unittest.skipUnless(torch.xpu.device_count() == 1, "test requires a XPU")(test_case)
188
-
189
-
190
- def require_multi_device(test_case):
191
- """
192
- Decorator marking a test that requires a multi-device setup. These tests are skipped on a machine without multiple
193
- devices.
194
- """
195
- return unittest.skipUnless(device_count > 1, "test requires multiple hardware accelerators")(test_case)
196
-
197
-
198
- def require_multi_gpu(test_case):
199
- """
200
- Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple
201
- GPUs.
202
- """
203
- return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
204
-
205
-
206
- def require_multi_xpu(test_case):
207
- """
208
- Decorator marking a test that requires a multi-XPU setup. These tests are skipped on a machine without multiple
209
- XPUs.
210
- """
211
- return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
212
-
213
-
214
- def require_deepspeed(test_case):
215
- """
216
- Decorator marking a test that requires DeepSpeed installed. These tests are skipped when DeepSpeed isn't installed
217
- """
218
- return unittest.skipUnless(is_deepspeed_available(), "test requires DeepSpeed")(test_case)
219
-
220
-
221
- def require_fsdp(test_case):
222
- """
223
- Decorator marking a test that requires FSDP installed. These tests are skipped when FSDP isn't installed
224
- """
225
- return unittest.skipUnless(is_torch_version(">=", "1.12.0"), "test requires torch version >= 1.12.0")(test_case)
226
-
227
-
228
- def require_torch_min_version(test_case=None, version=None):
229
- """
230
- Decorator marking that a test requires a particular torch version to be tested. These tests are skipped when an
231
- installed torch version is less than the required one.
232
- """
233
- if test_case is None:
234
- return partial(require_torch_min_version, version=version)
235
- return unittest.skipUnless(is_torch_version(">=", version), f"test requires torch version >= {version}")(test_case)
236
-
237
-
238
- def require_tensorboard(test_case):
239
- """
240
- Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't
241
- installed
242
- """
243
- return unittest.skipUnless(is_tensorboard_available(), "test requires Tensorboard")(test_case)
244
-
245
-
246
- def require_wandb(test_case):
247
- """
248
- Decorator marking a test that requires wandb installed. These tests are skipped when wandb isn't installed
249
- """
250
- return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
251
-
252
-
253
- def require_comet_ml(test_case):
254
- """
255
- Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed
256
- """
257
- return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case)
258
-
259
-
260
- def require_clearml(test_case):
261
- """
262
- Decorator marking a test that requires clearml installed. These tests are skipped when clearml isn't installed
263
- """
264
- return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
265
-
266
-
267
- def require_dvclive(test_case):
268
- """
269
- Decorator marking a test that requires dvclive installed. These tests are skipped when dvclive isn't installed
270
- """
271
- return unittest.skipUnless(is_dvclive_available(), "test requires dvclive")(test_case)
272
-
273
-
274
- def require_pandas(test_case):
275
- """
276
- Decorator marking a test that requires pandas installed. These tests are skipped when pandas isn't installed
277
- """
278
- return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
279
-
280
-
281
- _atleast_one_tracker_available = (
282
- any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
283
- )
284
-
285
-
286
- def require_trackers(test_case):
287
- """
288
- Decorator marking that a test requires at least one tracking library installed. These tests are skipped when none
289
- are installed
290
- """
291
- return unittest.skipUnless(
292
- _atleast_one_tracker_available,
293
- "test requires at least one tracker to be available and for `comet_ml` to not be installed",
294
- )(test_case)
295
-
296
-
297
- class TempDirTestCase(unittest.TestCase):
298
- """
299
- A TestCase class that keeps a single `tempfile.TemporaryDirectory` open for the duration of the class, wipes its
300
- data at the start of a test, and then destroyes it at the end of the TestCase.
301
-
302
- Useful for when a class or API requires a single constant folder throughout it's use, such as Weights and Biases
303
-
304
- The temporary directory location will be stored in `self.tmpdir`
305
- """
306
-
307
- clear_on_setup = True
308
-
309
- @classmethod
310
- def setUpClass(cls):
311
- "Creates a `tempfile.TemporaryDirectory` and stores it in `cls.tmpdir`"
312
- cls.tmpdir = tempfile.mkdtemp()
313
-
314
- @classmethod
315
- def tearDownClass(cls):
316
- "Remove `cls.tmpdir` after test suite has finished"
317
- if os.path.exists(cls.tmpdir):
318
- shutil.rmtree(cls.tmpdir)
319
-
320
- def setUp(self):
321
- "Destroy all contents in `self.tmpdir`, but not `self.tmpdir`"
322
- if self.clear_on_setup:
323
- for path in Path(self.tmpdir).glob("**/*"):
324
- if path.is_file():
325
- path.unlink()
326
- elif path.is_dir():
327
- shutil.rmtree(path)
328
-
329
-
330
- class AccelerateTestCase(unittest.TestCase):
331
- """
332
- A TestCase class that will reset the accelerator state at the end of every test. Every test that checks or utilizes
333
- the `AcceleratorState` class should inherit from this to avoid silent failures due to state being shared between
334
- tests.
335
- """
336
-
337
- def tearDown(self):
338
- super().tearDown()
339
- # Reset the state of the AcceleratorState singleton.
340
- AcceleratorState._reset_state()
341
- PartialState._reset_state()
342
-
343
-
344
- class MockingTestCase(unittest.TestCase):
345
- """
346
- A TestCase class designed to dynamically add various mockers that should be used in every test, mimicking the
347
- behavior of a class-wide mock when defining one normally will not do.
348
-
349
- Useful when a mock requires specific information available only initialized after `TestCase.setUpClass`, such as
350
- setting an environment variable with that information.
351
-
352
- The `add_mocks` function should be ran at the end of a `TestCase`'s `setUp` function, after a call to
353
- `super().setUp()` such as:
354
- ```python
355
- def setUp(self):
356
- super().setUp()
357
- mocks = mock.patch.dict(os.environ, {"SOME_ENV_VAR", "SOME_VALUE"})
358
- self.add_mocks(mocks)
359
- ```
360
- """
361
-
362
- def add_mocks(self, mocks: Union[mock.Mock, List[mock.Mock]]):
363
- """
364
- Add custom mocks for tests that should be repeated on each test. Should be called during
365
- `MockingTestCase.setUp`, after `super().setUp()`.
366
-
367
- Args:
368
- mocks (`mock.Mock` or list of `mock.Mock`):
369
- Mocks that should be added to the `TestCase` after `TestCase.setUpClass` has been run
370
- """
371
- self.mocks = mocks if isinstance(mocks, (tuple, list)) else [mocks]
372
- for m in self.mocks:
373
- m.start()
374
- self.addCleanup(m.stop)
375
-
376
-
377
- def are_the_same_tensors(tensor):
378
- state = AcceleratorState()
379
- tensor = tensor[None].clone().to(state.device)
380
- tensors = gather(tensor).cpu()
381
- tensor = tensor[0].cpu()
382
- for i in range(tensors.shape[0]):
383
- if not torch.equal(tensors[i], tensor):
384
- return False
385
- return True
386
-
387
-
388
- class _RunOutput:
389
- def __init__(self, returncode, stdout, stderr):
390
- self.returncode = returncode
391
- self.stdout = stdout
392
- self.stderr = stderr
393
-
394
-
395
- async def _read_stream(stream, callback):
396
- while True:
397
- line = await stream.readline()
398
- if line:
399
- callback(line)
400
- else:
401
- break
402
-
403
-
404
- async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
405
- if echo:
406
- print("\nRunning: ", " ".join(cmd))
407
-
408
- p = await asyncio.create_subprocess_exec(
409
- cmd[0],
410
- *cmd[1:],
411
- stdin=stdin,
412
- stdout=asyncio.subprocess.PIPE,
413
- stderr=asyncio.subprocess.PIPE,
414
- env=env,
415
- )
416
-
417
- # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
418
- # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
419
- #
420
- # If it starts hanging, will need to switch to the following code. The problem is that no data
421
- # will be seen until it's done and if it hangs for example there will be no debug info.
422
- # out, err = await p.communicate()
423
- # return _RunOutput(p.returncode, out, err)
424
-
425
- out = []
426
- err = []
427
-
428
- def tee(line, sink, pipe, label=""):
429
- line = line.decode("utf-8").rstrip()
430
- sink.append(line)
431
- if not quiet:
432
- print(label, line, file=pipe)
433
-
434
- # XXX: the timeout doesn't seem to make any difference here
435
- await asyncio.wait(
436
- [
437
- asyncio.create_task(_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:"))),
438
- asyncio.create_task(_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:"))),
439
- ],
440
- timeout=timeout,
441
- )
442
- return _RunOutput(await p.wait(), out, err)
443
-
444
-
445
- def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
446
- loop = asyncio.get_event_loop()
447
- result = loop.run_until_complete(
448
- _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
449
- )
450
-
451
- cmd_str = " ".join(cmd)
452
- if result.returncode > 0:
453
- stderr = "\n".join(result.stderr)
454
- raise RuntimeError(
455
- f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
456
- f"The combined stderr from workers follows:\n{stderr}"
457
- )
458
-
459
- return result
460
-
461
-
462
- class SubprocessCallException(Exception):
463
- pass
464
-
465
-
466
- def run_command(command: List[str], return_stdout=False, env=None):
467
- """
468
- Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
469
- if an error occured while running `command`
470
- """
471
- if env is None:
472
- env = os.environ.copy()
473
- try:
474
- output = subprocess.check_output(command, stderr=subprocess.STDOUT, env=env)
475
- if return_stdout:
476
- if hasattr(output, "decode"):
477
- output = output.decode("utf-8")
478
- return output
479
- except subprocess.CalledProcessError as e:
480
- raise SubprocessCallException(
481
- f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
482
- ) from e
483
-
484
-
485
- @contextmanager
486
- def assert_exception(exception_class: Exception, msg: str = None) -> bool:
487
- """
488
- Context manager to assert that the right `Exception` class was raised.
489
-
490
- If `msg` is provided, will check that the message is contained in the raised exception.
491
- """
492
- was_ran = False
493
- try:
494
- yield
495
- was_ran = True
496
- except Exception as e:
497
- assert isinstance(e, exception_class), f"Expected exception of type {exception_class} but got {type(e)}"
498
- if msg is not None:
499
- assert msg in str(e), f"Expected message '{msg}' to be in exception but got '{str(e)}'"
500
- if was_ran:
501
- raise AssertionError(f"Expected exception of type {exception_class} but ran without issue.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/test_utils/training.py DELETED
@@ -1,89 +0,0 @@
1
-
2
-
3
- import numpy as np
4
- import torch
5
- from torch.utils.data import DataLoader
6
-
7
- from accelerate.utils.dataclasses import DistributedType
8
-
9
-
10
- class RegressionDataset:
11
- def __init__(self, a=2, b=3, length=64, seed=None):
12
- rng = np.random.default_rng(seed)
13
- self.length = length
14
- self.x = rng.normal(size=(length,)).astype(np.float32)
15
- self.y = a * self.x + b + rng.normal(scale=0.1, size=(length,)).astype(np.float32)
16
-
17
- def __len__(self):
18
- return self.length
19
-
20
- def __getitem__(self, i):
21
- return {"x": self.x[i], "y": self.y[i]}
22
-
23
-
24
- class RegressionModel4XPU(torch.nn.Module):
25
- def __init__(self, a=0, b=0, double_output=False):
26
- super().__init__()
27
- self.a = torch.nn.Parameter(torch.tensor([2, 3]).float())
28
- self.b = torch.nn.Parameter(torch.tensor([2, 3]).float())
29
- self.first_batch = True
30
-
31
- def forward(self, x=None):
32
- if self.first_batch:
33
- print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
34
- self.first_batch = False
35
- return x * self.a[0] + self.b[0]
36
-
37
-
38
- class RegressionModel(torch.nn.Module):
39
- def __init__(self, a=0, b=0, double_output=False):
40
- super().__init__()
41
- self.a = torch.nn.Parameter(torch.tensor(a).float())
42
- self.b = torch.nn.Parameter(torch.tensor(b).float())
43
- self.first_batch = True
44
-
45
- def forward(self, x=None):
46
- if self.first_batch:
47
- print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
48
- self.first_batch = False
49
- return x * self.a + self.b
50
-
51
-
52
- def mocked_dataloaders(accelerator, batch_size: int = 16):
53
- from datasets import load_dataset
54
- from transformers import AutoTokenizer
55
-
56
- tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
57
- data_files = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
58
- datasets = load_dataset("csv", data_files=data_files)
59
- label_list = datasets["train"].unique("label")
60
-
61
- label_to_id = {v: i for i, v in enumerate(label_list)}
62
-
63
- def tokenize_function(examples):
64
- # max_length=None => use the model max length (it's actually the default)
65
- outputs = tokenizer(
66
- examples["sentence1"], examples["sentence2"], truncation=True, max_length=None, padding="max_length"
67
- )
68
- if "label" in examples:
69
- outputs["labels"] = [label_to_id[l] for l in examples["label"]]
70
- return outputs
71
-
72
- # Apply the method we just defined to all the examples in all the splits of the dataset
73
- tokenized_datasets = datasets.map(
74
- tokenize_function,
75
- batched=True,
76
- remove_columns=["sentence1", "sentence2", "label"],
77
- )
78
-
79
- def collate_fn(examples):
80
- # On TPU it's best to pad everything to the same length or training will be very slow.
81
- if accelerator.distributed_type == DistributedType.TPU:
82
- return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
83
- return tokenizer.pad(examples, padding="longest", return_tensors="pt")
84
-
85
- # Instantiate dataloaders.
86
- train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=2)
87
- eval_dataloader = DataLoader(tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=1)
88
-
89
- return train_dataloader, eval_dataloader
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/tracking.py CHANGED
@@ -3,29 +3,6 @@
3
  # Expectation:
4
  # Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}
5
 
6
- import json
7
- import os
8
- import time
9
- from functools import wraps
10
- from typing import Any, Dict, List, Optional, Union
11
-
12
- import yaml
13
-
14
- from .logging import get_logger
15
- from .state import PartialState
16
- from .utils import (
17
- LoggerType,
18
- is_aim_available,
19
- is_clearml_available,
20
- is_comet_ml_available,
21
- is_dvclive_available,
22
- is_mlflow_available,
23
- is_tensorboard_available,
24
- is_wandb_available,
25
- listify,
26
- )
27
-
28
-
29
  _available_trackers = []
30
 
31
  if is_tensorboard_available():
 
3
  # Expectation:
4
  # Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  _available_trackers = []
7
 
8
  if is_tensorboard_available():
src/utils/bnb.py CHANGED
@@ -1,31 +1,3 @@
1
-
2
-
3
-
4
- import logging
5
- import os
6
- from copy import deepcopy
7
- from typing import Dict, List, Optional, Union
8
-
9
- import torch
10
- import torch.nn as nn
11
-
12
- from accelerate.utils.imports import (
13
- is_4bit_bnb_available,
14
- is_8bit_bnb_available,
15
- )
16
-
17
- from ..big_modeling import dispatch_model, init_empty_weights
18
- from .dataclasses import BnbQuantizationConfig
19
- from .modeling import (
20
- find_tied_parameters,
21
- get_balanced_memory,
22
- infer_auto_device_map,
23
- load_checkpoint_in_model,
24
- offload_weight,
25
- set_module_tensor_to_device,
26
- )
27
-
28
-
29
  logger = logging.getLogger(__name__)
30
 
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  logger = logging.getLogger(__name__)
2
 
3
 
src/utils/constants.py CHANGED
@@ -1,8 +1,3 @@
1
-
2
-
3
- import operator as op
4
-
5
-
6
  SCALER_NAME = "scaler.pt"
7
  MODEL_NAME = "pytorch_model"
8
  SAFE_MODEL_NAME = "model"
 
 
 
 
 
 
1
  SCALER_NAME = "scaler.pt"
2
  MODEL_NAME = "pytorch_model"
3
  SAFE_MODEL_NAME = "model"